hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
57609a20c920831d58c649bbf6f27acb000fafc7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define N 2048
#define nthreads 512
__global__ void matrix_mul (int* A, int*B,int*C,int size){
int i=threadIdx.x+(blockIdx.x*blockDim.x);
int rowidx= (i/size)*size;
int colidx= i%size;
int acc=0;
int k;
for(k=0; k<size;k++){
acc+=A[rowidx+k]*B[colidx+k*size];
}
C[i]=acc;
}
int main() {
int*A=(int*)malloc(N*N*sizeof(int));
int* dev_A;
int*B=(int*)malloc(N*N*sizeof(int));
int* dev_B;
int*C=(int*)malloc(N*N*sizeof(int));
int* dev_C;
int i;
for(i=0;i<N*N;i++){
A[i]=i;
}
for(i=0;i<N*N;i++){
B[i]=i;
}
// allocate space for all the three matrixes
hipMalloc( (void**)&dev_A, N*N*sizeof(int));
hipMalloc( (void**)&dev_B, N*N*sizeof(int) );
hipMalloc( (void**)&dev_C, N*N*sizeof(int) );
//send data to device
hipMemcpy( dev_A, A, N*N*sizeof(int), hipMemcpyHostToDevice );
hipMemcpy( dev_B, B, N*N*sizeof(int), hipMemcpyHostToDevice );
// launch matrix_mul kernel
hipLaunchKernelGGL(( matrix_mul), dim3((N*N)/nthreads), dim3(nthreads) , 0, 0, dev_A, dev_B, dev_C, N);
// copy results
hipMemcpy( C, dev_C, N*N*sizeof(int), hipMemcpyDeviceToHost );
/* for(i=0;i<N*N;i++){
if(i%N==0 && i!=0)printf("\n");
printf("%d ", C[i]);
}
printf("\n"); */
free(A); free(B);free(C);
hipFree( dev_A ); hipFree( dev_B );hipFree( dev_C );
return 0;
}
| 57609a20c920831d58c649bbf6f27acb000fafc7.cu | #include <stdio.h>
#define N 2048
#define nthreads 512
__global__ void matrix_mul (int* A, int*B,int*C,int size){
int i=threadIdx.x+(blockIdx.x*blockDim.x);
int rowidx= (i/size)*size;
int colidx= i%size;
int acc=0;
int k;
for(k=0; k<size;k++){
acc+=A[rowidx+k]*B[colidx+k*size];
}
C[i]=acc;
}
int main() {
int*A=(int*)malloc(N*N*sizeof(int));
int* dev_A;
int*B=(int*)malloc(N*N*sizeof(int));
int* dev_B;
int*C=(int*)malloc(N*N*sizeof(int));
int* dev_C;
int i;
for(i=0;i<N*N;i++){
A[i]=i;
}
for(i=0;i<N*N;i++){
B[i]=i;
}
// allocate space for all the three matrixes
cudaMalloc( (void**)&dev_A, N*N*sizeof(int));
cudaMalloc( (void**)&dev_B, N*N*sizeof(int) );
cudaMalloc( (void**)&dev_C, N*N*sizeof(int) );
//send data to device
cudaMemcpy( dev_A, A, N*N*sizeof(int), cudaMemcpyHostToDevice );
cudaMemcpy( dev_B, B, N*N*sizeof(int), cudaMemcpyHostToDevice );
// launch matrix_mul kernel
matrix_mul<<< (N*N)/nthreads, nthreads >>>(dev_A, dev_B, dev_C, N);
// copy results
cudaMemcpy( C, dev_C, N*N*sizeof(int), cudaMemcpyDeviceToHost );
/* for(i=0;i<N*N;i++){
if(i%N==0 && i!=0)printf("\n");
printf("%d ", C[i]);
}
printf("\n"); */
free(A); free(B);free(C);
cudaFree( dev_A ); cudaFree( dev_B );cudaFree( dev_C );
return 0;
}
|
c8aba420e521e4324c4b8d816ffc9fc6193e693b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright Contributors to the Open Shading Language project.
// SPDX-License-Identifier: BSD-3-Clause
// https://github.com/AcademySoftwareFoundation/OpenShadingLanguage
#include <optix.h>
#if (OPTIX_VERSION < 70000)
#include <optixu/optixu_aabb_namespace.h>
#include <optixu/optixu_math_namespace.h>
#include <optixu/optixu_vector_types.h>
using namespace optix;
rtDeclareVariable (float3, p, , );
rtDeclareVariable (float3, ex, , );
rtDeclareVariable (float3, ey, , );
rtDeclareVariable (float3, n, , );
rtDeclareVariable (float, eu, , );
rtDeclareVariable (float, ev, , );
rtDeclareVariable (float, a, , );
rtDeclareVariable (float3, texcoord, attribute texcoord, );
rtDeclareVariable (float3, geometric_normal, attribute geometric_normal, );
rtDeclareVariable (float3, shading_normal, attribute shading_normal, );
rtDeclareVariable (float, surface_area, attribute surface_area, );
rtDeclareVariable (float3, dPdu, attribute dPdu, );
rtDeclareVariable (float3, dPdv, attribute dPdv, );
rtDeclareVariable (optix::Ray, ray, rtCurrentRay, );
RT_PROGRAM void intersect (void)
{
float dn = dot(ray.direction, n);
float en = dot(p - ray.origin, n);
if (dn * en > 0) {
float t = en / dn;
float3 h = (ray.origin + ray.direction * t) - p;
float dx = dot(h, ex) * eu;
float dy = dot(h, ey) * ev;
if (dx >= 0 && dx < 1.0f && dy >= 0 && dy < 1.0f && rtPotentialIntersection(t)) {
shading_normal = geometric_normal = n;
texcoord = make_float3(dot (h, ex) * eu, dot (h, ey) * ev, 0.0f);
dPdu = ey;
dPdv = ex;
surface_area = a;
rtReportIntersection(0);
}
}
}
RT_PROGRAM void bounds (int, float result[6])
{
const float3 p00 = p;
const float3 p01 = p + ex;
const float3 p10 = p + ey;
const float3 p11 = p + ex + ey;
const float area = length(cross(ex, ey));
optix::Aabb* aabb = reinterpret_cast<optix::Aabb*>(result);
if (area > 0.0f && !isinf(area)) {
aabb->m_min = fminf (fminf (p00, p01), fminf (p10, p11));
aabb->m_max = fmaxf (fmaxf (p00, p01), fmaxf (p10, p11));
} else {
aabb->invalidate();
}
}
#else //#if (OPTIX_VERSION < 70000)
#include "wrapper.h"
#include "rend_lib.h"
#include "render_params.h"
extern "C" __device__
void __direct_callable__quad_shaderglobals (const unsigned int idx,
const float t_hit,
const float3 ray_origin,
const float3 ray_direction,
ShaderGlobals *sg)
{
const GenericData *g_data = reinterpret_cast<const GenericData *>(optixGetSbtDataPointer());
const QuadParams *g_quads = reinterpret_cast<const QuadParams *>(g_data->data);
const QuadParams &quad = g_quads[idx];
const float3 P = ray_origin + t_hit * ray_direction;
float3 h = P - quad.p;
sg->N = sg->Ng = quad.n;
sg->u = dot (h, quad.ex) * quad.eu;
sg->v = dot (h, quad.ey) * quad.ev;
sg->dPdu = quad.ey;
sg->dPdv = quad.ex;
sg->surfacearea = quad.a;
sg->shaderID = quad.shaderID;
}
extern "C" __global__
void __intersection__quad ()
{
const GenericData *g_data = reinterpret_cast<const GenericData *>(optixGetSbtDataPointer());
const QuadParams *g_quads = reinterpret_cast<const QuadParams *>(g_data->data);
const unsigned int idx = optixGetPrimitiveIndex();
const QuadParams &quad = g_quads[idx];
const float3 ray_origin = optixGetObjectRayOrigin();
const float3 ray_direction = optixGetObjectRayDirection();
float dn = dot(ray_direction, quad.n);
float en = dot(quad.p - ray_origin, quad.n);
if (dn * en > 0) {
float t = en / dn;
float3 h = (ray_origin + ray_direction * t) - quad.p;
float dx = dot(h, quad.ex) * quad.eu;
float dy = dot(h, quad.ey) * quad.ev;
if (dx >= 0 && dx < 1.0f && dy >= 0 && dy < 1.0f && t < optixGetRayTmax())
optixReportIntersection (t, RAYTRACER_HIT_QUAD);
}
}
#endif //#if (OPTIX_VERSION < 70000)
| c8aba420e521e4324c4b8d816ffc9fc6193e693b.cu | // Copyright Contributors to the Open Shading Language project.
// SPDX-License-Identifier: BSD-3-Clause
// https://github.com/AcademySoftwareFoundation/OpenShadingLanguage
#include <optix.h>
#if (OPTIX_VERSION < 70000)
#include <optixu/optixu_aabb_namespace.h>
#include <optixu/optixu_math_namespace.h>
#include <optixu/optixu_vector_types.h>
using namespace optix;
rtDeclareVariable (float3, p, , );
rtDeclareVariable (float3, ex, , );
rtDeclareVariable (float3, ey, , );
rtDeclareVariable (float3, n, , );
rtDeclareVariable (float, eu, , );
rtDeclareVariable (float, ev, , );
rtDeclareVariable (float, a, , );
rtDeclareVariable (float3, texcoord, attribute texcoord, );
rtDeclareVariable (float3, geometric_normal, attribute geometric_normal, );
rtDeclareVariable (float3, shading_normal, attribute shading_normal, );
rtDeclareVariable (float, surface_area, attribute surface_area, );
rtDeclareVariable (float3, dPdu, attribute dPdu, );
rtDeclareVariable (float3, dPdv, attribute dPdv, );
rtDeclareVariable (optix::Ray, ray, rtCurrentRay, );
RT_PROGRAM void intersect (void)
{
float dn = dot(ray.direction, n);
float en = dot(p - ray.origin, n);
if (dn * en > 0) {
float t = en / dn;
float3 h = (ray.origin + ray.direction * t) - p;
float dx = dot(h, ex) * eu;
float dy = dot(h, ey) * ev;
if (dx >= 0 && dx < 1.0f && dy >= 0 && dy < 1.0f && rtPotentialIntersection(t)) {
shading_normal = geometric_normal = n;
texcoord = make_float3(dot (h, ex) * eu, dot (h, ey) * ev, 0.0f);
dPdu = ey;
dPdv = ex;
surface_area = a;
rtReportIntersection(0);
}
}
}
RT_PROGRAM void bounds (int, float result[6])
{
const float3 p00 = p;
const float3 p01 = p + ex;
const float3 p10 = p + ey;
const float3 p11 = p + ex + ey;
const float area = length(cross(ex, ey));
optix::Aabb* aabb = reinterpret_cast<optix::Aabb*>(result);
if (area > 0.0f && !isinf(area)) {
aabb->m_min = fminf (fminf (p00, p01), fminf (p10, p11));
aabb->m_max = fmaxf (fmaxf (p00, p01), fmaxf (p10, p11));
} else {
aabb->invalidate();
}
}
#else //#if (OPTIX_VERSION < 70000)
#include "wrapper.h"
#include "rend_lib.h"
#include "render_params.h"
extern "C" __device__
void __direct_callable__quad_shaderglobals (const unsigned int idx,
const float t_hit,
const float3 ray_origin,
const float3 ray_direction,
ShaderGlobals *sg)
{
const GenericData *g_data = reinterpret_cast<const GenericData *>(optixGetSbtDataPointer());
const QuadParams *g_quads = reinterpret_cast<const QuadParams *>(g_data->data);
const QuadParams &quad = g_quads[idx];
const float3 P = ray_origin + t_hit * ray_direction;
float3 h = P - quad.p;
sg->N = sg->Ng = quad.n;
sg->u = dot (h, quad.ex) * quad.eu;
sg->v = dot (h, quad.ey) * quad.ev;
sg->dPdu = quad.ey;
sg->dPdv = quad.ex;
sg->surfacearea = quad.a;
sg->shaderID = quad.shaderID;
}
extern "C" __global__
void __intersection__quad ()
{
const GenericData *g_data = reinterpret_cast<const GenericData *>(optixGetSbtDataPointer());
const QuadParams *g_quads = reinterpret_cast<const QuadParams *>(g_data->data);
const unsigned int idx = optixGetPrimitiveIndex();
const QuadParams &quad = g_quads[idx];
const float3 ray_origin = optixGetObjectRayOrigin();
const float3 ray_direction = optixGetObjectRayDirection();
float dn = dot(ray_direction, quad.n);
float en = dot(quad.p - ray_origin, quad.n);
if (dn * en > 0) {
float t = en / dn;
float3 h = (ray_origin + ray_direction * t) - quad.p;
float dx = dot(h, quad.ex) * quad.eu;
float dy = dot(h, quad.ey) * quad.ev;
if (dx >= 0 && dx < 1.0f && dy >= 0 && dy < 1.0f && t < optixGetRayTmax())
optixReportIntersection (t, RAYTRACER_HIT_QUAD);
}
}
#endif //#if (OPTIX_VERSION < 70000)
|
2b31aa519f9dfa8f45ca87896ad111873bb1a644.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/sparse_to_dense_op.h"
#include "caffe2/core/common_gpu.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/GpuAtomics.cuh"
namespace caffe2 {
template <typename TInd, typename TData>
__global__ void SparseToDenseKernel(
size_t N, int64_t block_nitems, const TInd* indices, const TData* vals, TData* dst) {
CUDA_1D_KERNEL_LOOP(i, N) {
int idx = indices[i / block_nitems];
int dst_idx = block_nitems * idx + i % block_nitems;
gpu_atomic_add(&dst[dst_idx], vals[i]);
}
}
template <>
bool SparseToDenseOp<CUDAContext>::RunOnDevice() {
return DispatchHelper<TensorTypes<int32_t>>::call(
this, Input(INDICES));
}
template <>
template <typename TInd>
bool SparseToDenseOp<CUDAContext>::DoRunWithType() {
return DispatchHelper<
TensorTypes2<
float,
int32_t>,
TInd>::call(this, Input(VALUES));
}
template <>
template <typename TInd, typename TData>
bool SparseToDenseOp<CUDAContext>::DoRunWithType2() {
auto& sparse_indices = Input(INDICES);
CAFFE_ENFORCE_EQ(sparse_indices.dim(), 1);
auto& sparse_values = Input(VALUES);
CAFFE_ENFORCE_GE(sparse_values.dim(), 1);
CAFFE_ENFORCE_EQ(sparse_indices.numel(), sparse_values.dim(0));
const TInd* sparse_indices_vec = sparse_indices.template data<TInd>();
const int32_t sparse_indices_len = sparse_indices.dim32(0);
const int output_first_dim =
GetOutputFirstDim(sparse_indices_vec, sparse_indices_len);
auto shape = sparse_values.sizes().vec();
shape[0] = output_first_dim;
auto* output = Output(0, shape, at::dtype<TData>());
TData* output_data = output->template mutable_data<TData>();
math::Set<TData>(output->numel(), TData(0), output_data, &context_);
const auto block_nitems = sparse_values.size_from_dim(1);
const TData* sparse_values_vec = sparse_values.template data<TData>();
size_t N = block_nitems * sparse_indices_len;
CAFFE_ENFORCE_EQ(output->numel(), output_first_dim * block_nitems);
hipLaunchKernelGGL(( SparseToDenseKernel<TInd, TData>),
dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0,
context_.cuda_stream(),
N,
block_nitems,
sparse_indices_vec,
sparse_values_vec,
output_data
);
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(SparseToDense, SparseToDenseOp<CUDAContext>);
} // namespace caffe2
| 2b31aa519f9dfa8f45ca87896ad111873bb1a644.cu | #include "caffe2/operators/sparse_to_dense_op.h"
#include "caffe2/core/common_gpu.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/GpuAtomics.cuh"
namespace caffe2 {
template <typename TInd, typename TData>
__global__ void SparseToDenseKernel(
size_t N, int64_t block_nitems, const TInd* indices, const TData* vals, TData* dst) {
CUDA_1D_KERNEL_LOOP(i, N) {
int idx = indices[i / block_nitems];
int dst_idx = block_nitems * idx + i % block_nitems;
gpu_atomic_add(&dst[dst_idx], vals[i]);
}
}
template <>
bool SparseToDenseOp<CUDAContext>::RunOnDevice() {
return DispatchHelper<TensorTypes<int32_t>>::call(
this, Input(INDICES));
}
template <>
template <typename TInd>
bool SparseToDenseOp<CUDAContext>::DoRunWithType() {
return DispatchHelper<
TensorTypes2<
float,
int32_t>,
TInd>::call(this, Input(VALUES));
}
template <>
template <typename TInd, typename TData>
bool SparseToDenseOp<CUDAContext>::DoRunWithType2() {
auto& sparse_indices = Input(INDICES);
CAFFE_ENFORCE_EQ(sparse_indices.dim(), 1);
auto& sparse_values = Input(VALUES);
CAFFE_ENFORCE_GE(sparse_values.dim(), 1);
CAFFE_ENFORCE_EQ(sparse_indices.numel(), sparse_values.dim(0));
const TInd* sparse_indices_vec = sparse_indices.template data<TInd>();
const int32_t sparse_indices_len = sparse_indices.dim32(0);
const int output_first_dim =
GetOutputFirstDim(sparse_indices_vec, sparse_indices_len);
auto shape = sparse_values.sizes().vec();
shape[0] = output_first_dim;
auto* output = Output(0, shape, at::dtype<TData>());
TData* output_data = output->template mutable_data<TData>();
math::Set<TData>(output->numel(), TData(0), output_data, &context_);
const auto block_nitems = sparse_values.size_from_dim(1);
const TData* sparse_values_vec = sparse_values.template data<TData>();
size_t N = block_nitems * sparse_indices_len;
CAFFE_ENFORCE_EQ(output->numel(), output_first_dim * block_nitems);
SparseToDenseKernel<TInd, TData><<<
CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0,
context_.cuda_stream()>>>(
N,
block_nitems,
sparse_indices_vec,
sparse_values_vec,
output_data
);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(SparseToDense, SparseToDenseOp<CUDAContext>);
} // namespace caffe2
|
1be2b3739b5171e70d141fe05f986dd8b34ee4d3.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* --------------------------------------------------------------------------
* \file dnn/src/cuda/convolution/chanwise/bwd_small.cu
*
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* This file has been modified by Megvii ("Megvii Modifications").
* All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
*
* --------------------------------------------------------------------------
*/
#include "./kern.cuh"
#include "kern_helper_hip.cuh"
#include "hip/hip_runtime.h"
#include "hip/hip_fp16.h"
#include "src/cuda/convolution/chanwise/launch_config.cuh"
#include "src/cuda/fp16_help.cuh"
using namespace megdnn;
using namespace cuda;
using namespace convolution;
using namespace chanwise;
namespace {
enum DepthwiseConv2dDirection { DIRECTION_FORWARD, DIRECTION_BACKWARD };
// CUDA kernel to compute the depthwise convolution forward pass in NCHW format,
// tailored for small images up to 32x32. Stride and depth multiplier must be 1.
// Padding must be 'SAME', which allows to reuse the index computation. Only
// use this kernel if CanLaunchDepthwiseConv2dGPUSmall(args) returns true.
// Tiles of the input and filter tensors are loaded into shared memory before
// performing the convolution. Each thread handles two elements per iteration,
// one each in the lower and upper half of a tile.
// Backprop input direction is the same as forward direction with the filter
// rotated by 180.
template <typename T, typename T2, DepthwiseConv2dDirection kDirection,
int kKnownFilterWidth, int kKnownFilterHeight, int kBlockDepth,
bool kKnownEvenHeight>
__global__ void
#if __CUDA_ARCH__ >= 750
__launch_bounds__(1024, 1)
#else
__launch_bounds__(1024, 2)
#endif
DepthwiseConv2dGPUKernelNCHWSmall(const Param param, const T* input,
const T* filter, T* output) {
// Holds block plus halo and filter data for blockDim.z depths.
extern __shared__ __align__(8) unsigned char shared_memory[];
static_assert(sizeof(T) <= 8, "Insufficient alignment detected");
T* const shared_data = reinterpret_cast<T*>(shared_memory);
const int num_batches = static_cast<int>(param.batch);
const int in_height = static_cast<int>(param.src_h);
const int in_width = static_cast<int>(param.src_w);
const int in_depth = static_cast<int>(param.src_chl);
const int filter_height = kKnownFilterHeight < 0
? static_cast<int>(param.flt_h)
: kKnownFilterHeight;
const int filter_width = kKnownFilterWidth < 0
? static_cast<int>(param.flt_w)
: kKnownFilterWidth;
const int pad_height = static_cast<int>(param.pad_h);
const int pad_width = static_cast<int>(param.pad_w);
// Fixed blockDim.z, tailored for maximum grid size for images of size
// 16x16. assert(blockDim.x == param.src_w); assert(blockDim.z ==
// kBlockDepth);
const int block_height = blockDim.y;
// These values are the same for all threads and could
// be precomputed on the CPU.
const int block_pixels = in_width * block_height;
const int block_size = block_pixels * kBlockDepth;
const int in_pixels = in_width * in_height;
const int in_increment = in_width - 1;
const int filter_pixels = filter_height * filter_width;
const int tile_width = in_width + filter_width - 1;
const int even_height = kKnownEvenHeight || (1 & ~in_height);
const int tile_height = in_height + filter_height - even_height;
const int tile_pixels = tile_width * tile_height;
const int tile_size = tile_pixels * kBlockDepth;
const int tile_offset = block_height * tile_width;
const int pad_offset = pad_height * tile_width + pad_width;
const int in_total_depth = in_depth * num_batches;
const int in_blocks = (in_total_depth + kBlockDepth - 1) / kBlockDepth;
const int thread_col = threadIdx.x;
const int thread_row = threadIdx.y;
const int thread_depth = threadIdx.z;
// Position in block.
const int thread_pix = thread_row * in_width + thread_col;
const int thread_idx = thread_depth * block_pixels + thread_pix;
// Initialize tile, in particular the padding.
for (int i = thread_idx; i < tile_size; i += block_size) {
shared_data[i] = T();
}
__syncthreads();
// Position in tensors.
const int tensor_idx = thread_depth * in_pixels + thread_pix;
// Position in (padded) shared memory.
const int data_pix = thread_row * tile_width + thread_col;
const int data_idx = thread_depth * tile_pixels + data_pix;
// Position in shared memory, offset by pad_height / pad_width.
const int tile_idx = data_idx + pad_offset;
// Filter is always in HWCK format, irrespective of the input/output format.
const int filter_pix = thread_idx / kBlockDepth;
const int filter_channel = thread_idx % kBlockDepth;
const int max_channel = in_total_depth - thread_depth;
const int filter_write_offset =
filter_pix < filter_pixels ? tile_size + thread_idx : 0;
const int filter_read_offset =
tile_size + thread_depth +
(kDirection == DIRECTION_FORWARD ? 0 : filter_pixels * kBlockDepth);
const bool skip_second =
!kKnownEvenHeight && thread_row + (in_height & 1) == block_height;
for (int b = blockIdx.x; b < in_blocks; b += gridDim.x) {
const int channel = b * kBlockDepth;
const int inout_offset = channel * in_pixels + tensor_idx;
const bool channel_in_range = channel < max_channel;
if (channel_in_range) {
const T* const in_ptr = inout_offset + input;
T* const tile_ptr = tile_idx + shared_data;
tile_ptr[0] = *in_ptr;
if (!skip_second) {
tile_ptr[tile_offset] = *(block_pixels + in_ptr);
}
}
if (filter_write_offset != 0) {
const int filter_offset =
(channel + filter_channel) % in_depth * filter_pixels +
filter_pix;
shared_data[filter_write_offset] = *(filter_offset + filter);
}
// Note: the condition to reach this is uniform across the entire block.
__syncthreads();
if (channel_in_range) {
T2 sum = {0.0, 0.0};
int shared_offset = data_idx;
const T* filter_ptr = filter_read_offset + shared_data;
#pragma unroll
for (int r = 0; r < filter_height; ++r) {
#pragma unroll
for (int c = 0; c < filter_width; ++c) {
if (kDirection == DIRECTION_BACKWARD) {
filter_ptr -= kBlockDepth;
}
const T2 filter_value = {*filter_ptr, *filter_ptr};
const T* const tile_ptr = shared_offset + shared_data;
const T2 tile_value = {tile_ptr[0], tile_ptr[tile_offset]};
sum = fma2(filter_value, tile_value, sum);
++shared_offset;
if (kDirection == DIRECTION_FORWARD) {
filter_ptr += kBlockDepth;
}
}
shared_offset += in_increment;
}
T* const out_ptr = inout_offset + output;
out_ptr[0] = static_cast<T>(sum.x);
if (!skip_second) {
out_ptr[block_pixels] = static_cast<T>(sum.y);
}
}
// Note: the condition to reach this is uniform across the entire block.
__syncthreads();
}
}
template <typename T, typename T2, DepthwiseConv2dDirection kDirection,
int kKnownFilterWidth, int kKnownFilterHeight, int kBlockDepth,
bool kKnownEvenHeight>
void LaunchDepthwiseConv2dGPUSmall(const Param& param, const T* input,
const T* filter, T* output,
hipStream_t stream) {
const int block_height = (param.src_h + 1) / 2;
dim3 block_dim;
int block_count;
void (*kernel)(const Param, const T*, const T*, T*);
block_dim = dim3(param.src_w, block_height, kBlockDepth);
block_count =
DIVUP(param.batch * param.src_chl * param.chl_mul, kBlockDepth) *
kBlockDepth;
kernel = DepthwiseConv2dGPUKernelNCHWSmall<
T, T2, kDirection, kKnownFilterWidth, kKnownFilterHeight,
kBlockDepth, kKnownEvenHeight>;
const int tile_width = param.src_w + param.flt_w - 1;
const int tile_height = block_height * 2 + param.flt_h - 1;
const int tile_pixels = tile_height * tile_width;
const int filter_pixels = param.flt_h * param.flt_w;
const int shared_memory_size =
kBlockDepth * (tile_pixels + filter_pixels) * sizeof(T);
const int num_outputs = param.out_h * param.out_w * block_count;
block_count = GetFixedBlockSize(num_outputs, kernel, shared_memory_size,
block_dim.x * block_dim.y * block_dim.z);
hipLaunchKernelGGL(( kernel), dim3(block_count), dim3(block_dim), shared_memory_size, stream,
param, input, filter, output);
after_kernel_launch();
}
template <typename T, typename T2, DepthwiseConv2dDirection kDirection,
int kKnownFilterWidth, int kKnownFilterHeight, int kBlockDepth>
void LaunchDepthwiseConv2dGPUSmall(const Param& param, const T* input,
const T* filter, T* output,
hipStream_t stream) {
if (param.src_h & 1) {
return LaunchDepthwiseConv2dGPUSmall<
T, T2, kDirection, kKnownFilterWidth, kKnownFilterHeight,
kBlockDepth, false>(param, input, filter, output, stream);
} else {
return LaunchDepthwiseConv2dGPUSmall<
T, T2, kDirection, kKnownFilterWidth, kKnownFilterHeight,
kBlockDepth, true>(param, input, filter, output, stream);
}
}
template <typename T, typename T2, DepthwiseConv2dDirection kDirection,
int kKnownFilterWidth, int kKnownFilterHeight>
void LaunchDepthwiseConv2dGPUSmall(const Param& param, const T* input,
const T* filter, T* output,
hipStream_t stream) {
// Maximize (power of two) kBlockDepth while keeping a block within 1024
// threads (2 pixels per thread).
const int block_pixels = (param.src_h + 1) / 2 * param.src_w;
if (block_pixels > 256) {
LaunchDepthwiseConv2dGPUSmall<T, T2, kDirection, kKnownFilterWidth,
kKnownFilterHeight, 2>(
param, input, filter, output, stream);
} else if (block_pixels > 128) {
LaunchDepthwiseConv2dGPUSmall<T, T2, kDirection, kKnownFilterWidth,
kKnownFilterHeight, 4>(
param, input, filter, output, stream);
} else {
LaunchDepthwiseConv2dGPUSmall<T, T2, kDirection, kKnownFilterWidth,
kKnownFilterHeight, 8>(
param, input, filter, output, stream);
}
}
} // anonymous namespace
namespace megdnn {
namespace cuda {
namespace convolution {
namespace chanwise {
// ===================================bwd data==================================
#define LAUNCH(type, type2) \
if (param.flt_h == 3 && param.flt_w == 3) { \
LaunchDepthwiseConv2dGPUSmall< \
type, type2, DepthwiseConv2dDirection::DIRECTION_BACKWARD, 3, \
3>(param, dst_grad, flt, src_grad, stream); \
} else { \
LaunchDepthwiseConv2dGPUSmall< \
type, type2, DepthwiseConv2dDirection::DIRECTION_BACKWARD, -1, \
-1>(param, dst_grad, flt, src_grad, stream); \
}
template <>
void run_bwd_data_small(float* src_grad, const float* dst_grad,
const float* flt, const Param& param,
hipStream_t stream) {
LAUNCH(float, float2);
}
#if TORCH_HIP_VERSION >= 9000
template <>
void run_bwd_data_small(__half* src_grad, const __half* dst_grad,
const __half* flt, const Param& param,
hipStream_t stream) {
LAUNCH(__half, __half2);
}
#endif
#undef LAUNCH
} // namespace chanwise
} // namespace convolution
} // namespace cuda
} // namespace megdnn
// vim: syntax=cuda.doxygen
| 1be2b3739b5171e70d141fe05f986dd8b34ee4d3.cu | /**
* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* --------------------------------------------------------------------------
* \file dnn/src/cuda/convolution/chanwise/bwd_small.cu
*
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* This file has been modified by Megvii ("Megvii Modifications").
* All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
*
* --------------------------------------------------------------------------
*/
#include "./kern.cuh"
#include "./kern_helper.cuh"
#include "cuda.h"
#include "cuda_fp16.h"
#include "src/cuda/convolution/chanwise/launch_config.cuh"
#include "src/cuda/fp16_help.cuh"
using namespace megdnn;
using namespace cuda;
using namespace convolution;
using namespace chanwise;
namespace {
enum DepthwiseConv2dDirection { DIRECTION_FORWARD, DIRECTION_BACKWARD };
// CUDA kernel to compute the depthwise convolution forward pass in NCHW format,
// tailored for small images up to 32x32. Stride and depth multiplier must be 1.
// Padding must be 'SAME', which allows to reuse the index computation. Only
// use this kernel if CanLaunchDepthwiseConv2dGPUSmall(args) returns true.
// Tiles of the input and filter tensors are loaded into shared memory before
// performing the convolution. Each thread handles two elements per iteration,
// one each in the lower and upper half of a tile.
// Backprop input direction is the same as forward direction with the filter
// rotated by 180°.
template <typename T, typename T2, DepthwiseConv2dDirection kDirection,
int kKnownFilterWidth, int kKnownFilterHeight, int kBlockDepth,
bool kKnownEvenHeight>
__global__ void
#if __CUDA_ARCH__ >= 750
__launch_bounds__(1024, 1)
#else
__launch_bounds__(1024, 2)
#endif
DepthwiseConv2dGPUKernelNCHWSmall(const Param param, const T* input,
const T* filter, T* output) {
// Holds block plus halo and filter data for blockDim.z depths.
extern __shared__ __align__(8) unsigned char shared_memory[];
static_assert(sizeof(T) <= 8, "Insufficient alignment detected");
T* const shared_data = reinterpret_cast<T*>(shared_memory);
const int num_batches = static_cast<int>(param.batch);
const int in_height = static_cast<int>(param.src_h);
const int in_width = static_cast<int>(param.src_w);
const int in_depth = static_cast<int>(param.src_chl);
const int filter_height = kKnownFilterHeight < 0
? static_cast<int>(param.flt_h)
: kKnownFilterHeight;
const int filter_width = kKnownFilterWidth < 0
? static_cast<int>(param.flt_w)
: kKnownFilterWidth;
const int pad_height = static_cast<int>(param.pad_h);
const int pad_width = static_cast<int>(param.pad_w);
// Fixed blockDim.z, tailored for maximum grid size for images of size
// 16x16. assert(blockDim.x == param.src_w); assert(blockDim.z ==
// kBlockDepth);
const int block_height = blockDim.y;
// These values are the same for all threads and could
// be precomputed on the CPU.
const int block_pixels = in_width * block_height;
const int block_size = block_pixels * kBlockDepth;
const int in_pixels = in_width * in_height;
const int in_increment = in_width - 1;
const int filter_pixels = filter_height * filter_width;
const int tile_width = in_width + filter_width - 1;
const int even_height = kKnownEvenHeight || (1 & ~in_height);
const int tile_height = in_height + filter_height - even_height;
const int tile_pixels = tile_width * tile_height;
const int tile_size = tile_pixels * kBlockDepth;
const int tile_offset = block_height * tile_width;
const int pad_offset = pad_height * tile_width + pad_width;
const int in_total_depth = in_depth * num_batches;
const int in_blocks = (in_total_depth + kBlockDepth - 1) / kBlockDepth;
const int thread_col = threadIdx.x;
const int thread_row = threadIdx.y;
const int thread_depth = threadIdx.z;
// Position in block.
const int thread_pix = thread_row * in_width + thread_col;
const int thread_idx = thread_depth * block_pixels + thread_pix;
// Initialize tile, in particular the padding.
for (int i = thread_idx; i < tile_size; i += block_size) {
shared_data[i] = T();
}
__syncthreads();
// Position in tensors.
const int tensor_idx = thread_depth * in_pixels + thread_pix;
// Position in (padded) shared memory.
const int data_pix = thread_row * tile_width + thread_col;
const int data_idx = thread_depth * tile_pixels + data_pix;
// Position in shared memory, offset by pad_height / pad_width.
const int tile_idx = data_idx + pad_offset;
// Filter is always in HWCK format, irrespective of the input/output format.
const int filter_pix = thread_idx / kBlockDepth;
const int filter_channel = thread_idx % kBlockDepth;
const int max_channel = in_total_depth - thread_depth;
const int filter_write_offset =
filter_pix < filter_pixels ? tile_size + thread_idx : 0;
const int filter_read_offset =
tile_size + thread_depth +
(kDirection == DIRECTION_FORWARD ? 0 : filter_pixels * kBlockDepth);
const bool skip_second =
!kKnownEvenHeight && thread_row + (in_height & 1) == block_height;
for (int b = blockIdx.x; b < in_blocks; b += gridDim.x) {
const int channel = b * kBlockDepth;
const int inout_offset = channel * in_pixels + tensor_idx;
const bool channel_in_range = channel < max_channel;
if (channel_in_range) {
const T* const in_ptr = inout_offset + input;
T* const tile_ptr = tile_idx + shared_data;
tile_ptr[0] = *in_ptr;
if (!skip_second) {
tile_ptr[tile_offset] = *(block_pixels + in_ptr);
}
}
if (filter_write_offset != 0) {
const int filter_offset =
(channel + filter_channel) % in_depth * filter_pixels +
filter_pix;
shared_data[filter_write_offset] = *(filter_offset + filter);
}
// Note: the condition to reach this is uniform across the entire block.
__syncthreads();
if (channel_in_range) {
T2 sum = {0.0, 0.0};
int shared_offset = data_idx;
const T* filter_ptr = filter_read_offset + shared_data;
#pragma unroll
for (int r = 0; r < filter_height; ++r) {
#pragma unroll
for (int c = 0; c < filter_width; ++c) {
if (kDirection == DIRECTION_BACKWARD) {
filter_ptr -= kBlockDepth;
}
const T2 filter_value = {*filter_ptr, *filter_ptr};
const T* const tile_ptr = shared_offset + shared_data;
const T2 tile_value = {tile_ptr[0], tile_ptr[tile_offset]};
sum = fma2(filter_value, tile_value, sum);
++shared_offset;
if (kDirection == DIRECTION_FORWARD) {
filter_ptr += kBlockDepth;
}
}
shared_offset += in_increment;
}
T* const out_ptr = inout_offset + output;
out_ptr[0] = static_cast<T>(sum.x);
if (!skip_second) {
out_ptr[block_pixels] = static_cast<T>(sum.y);
}
}
// Note: the condition to reach this is uniform across the entire block.
__syncthreads();
}
}
template <typename T, typename T2, DepthwiseConv2dDirection kDirection,
int kKnownFilterWidth, int kKnownFilterHeight, int kBlockDepth,
bool kKnownEvenHeight>
void LaunchDepthwiseConv2dGPUSmall(const Param& param, const T* input,
const T* filter, T* output,
cudaStream_t stream) {
const int block_height = (param.src_h + 1) / 2;
dim3 block_dim;
int block_count;
void (*kernel)(const Param, const T*, const T*, T*);
block_dim = dim3(param.src_w, block_height, kBlockDepth);
block_count =
DIVUP(param.batch * param.src_chl * param.chl_mul, kBlockDepth) *
kBlockDepth;
kernel = DepthwiseConv2dGPUKernelNCHWSmall<
T, T2, kDirection, kKnownFilterWidth, kKnownFilterHeight,
kBlockDepth, kKnownEvenHeight>;
const int tile_width = param.src_w + param.flt_w - 1;
const int tile_height = block_height * 2 + param.flt_h - 1;
const int tile_pixels = tile_height * tile_width;
const int filter_pixels = param.flt_h * param.flt_w;
const int shared_memory_size =
kBlockDepth * (tile_pixels + filter_pixels) * sizeof(T);
const int num_outputs = param.out_h * param.out_w * block_count;
block_count = GetFixedBlockSize(num_outputs, kernel, shared_memory_size,
block_dim.x * block_dim.y * block_dim.z);
kernel<<<block_count, block_dim, shared_memory_size, stream>>>(
param, input, filter, output);
after_kernel_launch();
}
template <typename T, typename T2, DepthwiseConv2dDirection kDirection,
int kKnownFilterWidth, int kKnownFilterHeight, int kBlockDepth>
void LaunchDepthwiseConv2dGPUSmall(const Param& param, const T* input,
const T* filter, T* output,
cudaStream_t stream) {
if (param.src_h & 1) {
return LaunchDepthwiseConv2dGPUSmall<
T, T2, kDirection, kKnownFilterWidth, kKnownFilterHeight,
kBlockDepth, false>(param, input, filter, output, stream);
} else {
return LaunchDepthwiseConv2dGPUSmall<
T, T2, kDirection, kKnownFilterWidth, kKnownFilterHeight,
kBlockDepth, true>(param, input, filter, output, stream);
}
}
template <typename T, typename T2, DepthwiseConv2dDirection kDirection,
int kKnownFilterWidth, int kKnownFilterHeight>
void LaunchDepthwiseConv2dGPUSmall(const Param& param, const T* input,
const T* filter, T* output,
cudaStream_t stream) {
// Maximize (power of two) kBlockDepth while keeping a block within 1024
// threads (2 pixels per thread).
const int block_pixels = (param.src_h + 1) / 2 * param.src_w;
if (block_pixels > 256) {
LaunchDepthwiseConv2dGPUSmall<T, T2, kDirection, kKnownFilterWidth,
kKnownFilterHeight, 2>(
param, input, filter, output, stream);
} else if (block_pixels > 128) {
LaunchDepthwiseConv2dGPUSmall<T, T2, kDirection, kKnownFilterWidth,
kKnownFilterHeight, 4>(
param, input, filter, output, stream);
} else {
LaunchDepthwiseConv2dGPUSmall<T, T2, kDirection, kKnownFilterWidth,
kKnownFilterHeight, 8>(
param, input, filter, output, stream);
}
}
} // anonymous namespace
namespace megdnn {
namespace cuda {
namespace convolution {
namespace chanwise {
// ===================================bwd data==================================
#define LAUNCH(type, type2) \
if (param.flt_h == 3 && param.flt_w == 3) { \
LaunchDepthwiseConv2dGPUSmall< \
type, type2, DepthwiseConv2dDirection::DIRECTION_BACKWARD, 3, \
3>(param, dst_grad, flt, src_grad, stream); \
} else { \
LaunchDepthwiseConv2dGPUSmall< \
type, type2, DepthwiseConv2dDirection::DIRECTION_BACKWARD, -1, \
-1>(param, dst_grad, flt, src_grad, stream); \
}
template <>
void run_bwd_data_small(float* src_grad, const float* dst_grad,
const float* flt, const Param& param,
cudaStream_t stream) {
LAUNCH(float, float2);
}
#if CUDA_VERSION >= 9000
template <>
void run_bwd_data_small(__half* src_grad, const __half* dst_grad,
const __half* flt, const Param& param,
cudaStream_t stream) {
LAUNCH(__half, __half2);
}
#endif
#undef LAUNCH
} // namespace chanwise
} // namespace convolution
} // namespace cuda
} // namespace megdnn
// vim: syntax=cuda.doxygen
|
38e47ab926fb56d8624995a19569d78ffd185897.hip | // !!! This is a file automatically generated by hipify!!!
#include <bcl/bcl.hpp>
#include <bcl/backends/experimental/nvshmem/backend.hpp>
#include <bcl/containers/experimental/cuda/DuplQueue.hpp>
#include <bcl/containers/experimental/cuda/launch_kernel.cuh>
#include <chrono>
#define NUM_INSERTS 2*8*1024
int main(int argc, char** argv) {
BCL::init(16);
printf("Hello, world! I am rank %lu/%lu\n",
BCL::rank(), BCL::nprocs());
BCL::cuda::init(14*1024);
size_t num_inserts = NUM_INSERTS;
size_t insert_size = 8*1024;
// Round up so each rank has an equal number of inserts.
size_t inserts_per_rank = (num_inserts + BCL::nprocs() - 1) / BCL::nprocs();
inserts_per_rank *= BCL::nprocs();
num_inserts = inserts_per_rank * BCL::nprocs();
BCL::cuda::DuplQueue<int> queue(0, num_inserts*insert_size);
BCL::cuda::device_vector<int, BCL::cuda::bcl_allocator<int>> values(insert_size);
// BCL::cuda::device_vector<int> values(insert_size);
std::vector<int> values_local(insert_size, BCL::rank());
values.assign(values_local.begin(), values_local.end());
BCL::cuda::barrier();
auto begin = std::chrono::high_resolution_clock::now();
BCL::cuda::launch(inserts_per_rank*32,
[] __device__ (size_t idx, BCL::cuda::DuplQueue<int>& queue,
BCL::cuda::device_vector<int, BCL::cuda::bcl_allocator<int>>& values) {
// BCL::cuda::device_vector<int>& values) {
bool success = queue.push_warp(values.data(), values.size());
if (!success) {
printf("AGH! I have failed!\n");
}
}, queue, values);
hipDeviceSynchronize();
BCL::cuda::barrier();
auto end = std::chrono::high_resolution_clock::now();
double duration = std::chrono::duration<double>(end - begin).count();
double data_moved = num_inserts*insert_size*sizeof(int);
double data_moved_gb = data_moved*1e-9;
double bw = data_moved / duration;
double bw_gb = bw*1e-9;
BCL::print("Total %lf s (%lf GB) (%lf GB/s)\n", duration, data_moved_gb, bw_gb);
if (BCL::rank() == 0) {
BCL::cuda::launch(num_inserts,
[] __device__ (size_t idx, BCL::cuda::DuplQueue<int>& queue) {
int value = 12;
bool success = queue.local_pop(value);
// printf("%lu: %d (%s)\n", idx, value, (success) ? "success" : "failure");
}, queue);
hipDeviceSynchronize();
}
BCL::cuda::barrier();
BCL::print("Here...\n");
BCL::cuda::barrier();
BCL::print("After barrier...\n");
BCL::finalize();
return 0;
}
| 38e47ab926fb56d8624995a19569d78ffd185897.cu | #include <bcl/bcl.hpp>
#include <bcl/backends/experimental/nvshmem/backend.hpp>
#include <bcl/containers/experimental/cuda/DuplQueue.hpp>
#include <bcl/containers/experimental/cuda/launch_kernel.cuh>
#include <chrono>
#define NUM_INSERTS 2*8*1024
int main(int argc, char** argv) {
BCL::init(16);
printf("Hello, world! I am rank %lu/%lu\n",
BCL::rank(), BCL::nprocs());
BCL::cuda::init(14*1024);
size_t num_inserts = NUM_INSERTS;
size_t insert_size = 8*1024;
// Round up so each rank has an equal number of inserts.
size_t inserts_per_rank = (num_inserts + BCL::nprocs() - 1) / BCL::nprocs();
inserts_per_rank *= BCL::nprocs();
num_inserts = inserts_per_rank * BCL::nprocs();
BCL::cuda::DuplQueue<int> queue(0, num_inserts*insert_size);
BCL::cuda::device_vector<int, BCL::cuda::bcl_allocator<int>> values(insert_size);
// BCL::cuda::device_vector<int> values(insert_size);
std::vector<int> values_local(insert_size, BCL::rank());
values.assign(values_local.begin(), values_local.end());
BCL::cuda::barrier();
auto begin = std::chrono::high_resolution_clock::now();
BCL::cuda::launch(inserts_per_rank*32,
[] __device__ (size_t idx, BCL::cuda::DuplQueue<int>& queue,
BCL::cuda::device_vector<int, BCL::cuda::bcl_allocator<int>>& values) {
// BCL::cuda::device_vector<int>& values) {
bool success = queue.push_warp(values.data(), values.size());
if (!success) {
printf("AGH! I have failed!\n");
}
}, queue, values);
cudaDeviceSynchronize();
BCL::cuda::barrier();
auto end = std::chrono::high_resolution_clock::now();
double duration = std::chrono::duration<double>(end - begin).count();
double data_moved = num_inserts*insert_size*sizeof(int);
double data_moved_gb = data_moved*1e-9;
double bw = data_moved / duration;
double bw_gb = bw*1e-9;
BCL::print("Total %lf s (%lf GB) (%lf GB/s)\n", duration, data_moved_gb, bw_gb);
if (BCL::rank() == 0) {
BCL::cuda::launch(num_inserts,
[] __device__ (size_t idx, BCL::cuda::DuplQueue<int>& queue) {
int value = 12;
bool success = queue.local_pop(value);
// printf("%lu: %d (%s)\n", idx, value, (success) ? "success" : "failure");
}, queue);
cudaDeviceSynchronize();
}
BCL::cuda::barrier();
BCL::print("Here...\n");
BCL::cuda::barrier();
BCL::print("After barrier...\n");
BCL::finalize();
return 0;
}
|
36d22c410ead225912d288d53c5bc8930d81ac39.hip | // !!! This is a file automatically generated by hipify!!!
// by Olaf Hall-Holt, 2007-2015
#include <iostream>
#include <string>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include "eriolHeader.h"
#include "../homography.h"
#include "NCCDemo.h"
#include "time.h"
Tile &loadJustOneTile(const string &tileID, const string &imgName);
vector<PixelLoc> getPixelsFor(int);
//Global variables
string tile;
string image;
string imageR;
string imageL;
double * best = new double[9];
float bestncc = -2;
float first;
double scale = 1;
bool initial = true;
thrust::host_vector<PixelLoc> interiorR;
thrust::host_vector<PixelLoc> interiorL;
thrust::host_vector<PixelLoc> interior;
Image myimg;
Image myimgOther;
void runNCC(double * current, int j, int k){
//main variables
double point[2];
thrust::device_vector<Color> d_intcolors = intcolors;
thrust::device_vector<Color> d_intcolors2 = intcolors2;
float ncc;
for(unsigned int i=0; i<interior.size(); ++i){
homography(interior[i].x + 0.5 , interior[i].y + 0.5, current, point);
Coord mycoord(point[0], point[1]);
if (point[0] < myimg.getWidth() && point[1] < myimg.getHeight()){
intcolors.push_back(asInterpolatedColor(mycoord, &myimg));
} else {
continue;
}
intcolors2.push_back(myimgOther.getPixel(interior[i]));
}
ncc = calculate_normalized_correlation(intcolors, intcolors2);
if (initial){
first = ncc;
initial = false;
cout << "Initial: " << first << endl;
}
if (ncc > bestncc){
bestncc = ncc;
for(int i=0;i<9;++i){
best[i] = current[i];
}
j=0;
}
long seed = (long)time(NULL) * j;
//cout << seed << endl;
randHomography(best, current, seed, scale/k);
intcolors.clear();
intcolors2.clear();
}
int main(int argc, char **argv)
{
//main variables
// double point[2];
// /*thrust::host_*/vector<Color> intcolors;
// /*thrust::host_*/vector<Color> intcolors2;
double * current = new double[9];
// float ncc;
Matrix3x3 myH1;
// //GPU variables
// double * d_point;
// float d_ncc; //in function
// Matrix3x3 d_myH1;
tile = argv[1];
image = argv[2];
imageR = image+"R";
imageL = image+"L";
interiorR = getContour(tile, imageR);
interiorL = getContour(tile, imageL);
best = new double[9];
if (interiorR.size() > interiorL.size())
{
interior = getContour(tile, imageL);
myH1 = getHomography(tile, imageL, imageR);
myimg = imageR.c_str();
myimgOther = imageL.c_str();
}
else
{
interior = getContour(tile, imageR);
myH1 = getHomography(tile, imageR, imageL);
myimg = imageL.c_str();
myimgOther = imageR.c_str();
}
cerr << "homography: " << myH1 << endl;
for(int i=0;i<9;++i){
current[i] = myH1.m[i];
}
// //CUDA allocation
// hipMalloc(d_point, 2*sizeof(double));
// hipMalloc(d_current, 9*sizeof(double));
// //CUDA copying
for(int k=10; k < 100000; k*=10){
cout << "Trying with scale = " << scale/k << endl;
float d_bestncc = -2;
double * d_best;
runNCC(current, j, k);
cout << "Best so far: " << bestncc << endl;
cout << "homography: ";
for(int i=0;i<9;++i){
cout << best[i] << " ";
}
cout << endl;
}
cout << "First: " << first << " Best: " << bestncc << endl;
cout << "homography: ";
for(int i=0;i<9;++i){
cout << best[i] << " ";
}
cout << endl;
}
| 36d22c410ead225912d288d53c5bc8930d81ac39.cu | // by Olaf Hall-Holt, 2007-2015
#include <iostream>
#include <string>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include "eriolHeader.h"
#include "../homography.h"
#include "NCCDemo.h"
#include "time.h"
Tile &loadJustOneTile(const string &tileID, const string &imgName);
vector<PixelLoc> getPixelsFor(int);
//Global variables
string tile;
string image;
string imageR;
string imageL;
double * best = new double[9];
float bestncc = -2;
float first;
double scale = 1;
bool initial = true;
thrust::host_vector<PixelLoc> interiorR;
thrust::host_vector<PixelLoc> interiorL;
thrust::host_vector<PixelLoc> interior;
Image myimg;
Image myimgOther;
void runNCC(double * current, int j, int k){
//main variables
double point[2];
thrust::device_vector<Color> d_intcolors = intcolors;
thrust::device_vector<Color> d_intcolors2 = intcolors2;
float ncc;
for(unsigned int i=0; i<interior.size(); ++i){
homography(interior[i].x + 0.5 , interior[i].y + 0.5, current, point);
Coord mycoord(point[0], point[1]);
if (point[0] < myimg.getWidth() && point[1] < myimg.getHeight()){
intcolors.push_back(asInterpolatedColor(mycoord, &myimg));
} else {
continue;
}
intcolors2.push_back(myimgOther.getPixel(interior[i]));
}
ncc = calculate_normalized_correlation(intcolors, intcolors2);
if (initial){
first = ncc;
initial = false;
cout << "Initial: " << first << endl;
}
if (ncc > bestncc){
bestncc = ncc;
for(int i=0;i<9;++i){
best[i] = current[i];
}
j=0;
}
long seed = (long)time(NULL) * j;
//cout << seed << endl;
randHomography(best, current, seed, scale/k);
intcolors.clear();
intcolors2.clear();
}
int main(int argc, char **argv)
{
//main variables
// double point[2];
// /*thrust::host_*/vector<Color> intcolors;
// /*thrust::host_*/vector<Color> intcolors2;
double * current = new double[9];
// float ncc;
Matrix3x3 myH1;
// //GPU variables
// double * d_point;
// float d_ncc; //in function
// Matrix3x3 d_myH1;
tile = argv[1];
image = argv[2];
imageR = image+"R";
imageL = image+"L";
interiorR = getContour(tile, imageR);
interiorL = getContour(tile, imageL);
best = new double[9];
if (interiorR.size() > interiorL.size())
{
interior = getContour(tile, imageL);
myH1 = getHomography(tile, imageL, imageR);
myimg = imageR.c_str();
myimgOther = imageL.c_str();
}
else
{
interior = getContour(tile, imageR);
myH1 = getHomography(tile, imageR, imageL);
myimg = imageL.c_str();
myimgOther = imageR.c_str();
}
cerr << "homography: " << myH1 << endl;
for(int i=0;i<9;++i){
current[i] = myH1.m[i];
}
// //CUDA allocation
// cudaMalloc(d_point, 2*sizeof(double));
// cudaMalloc(d_current, 9*sizeof(double));
// //CUDA copying
for(int k=10; k < 100000; k*=10){
cout << "Trying with scale = " << scale/k << endl;
float d_bestncc = -2;
double * d_best;
runNCC(current, j, k);
cout << "Best so far: " << bestncc << endl;
cout << "homography: ";
for(int i=0;i<9;++i){
cout << best[i] << " ";
}
cout << endl;
}
cout << "First: " << first << " Best: " << bestncc << endl;
cout << "homography: ";
for(int i=0;i<9;++i){
cout << best[i] << " ";
}
cout << endl;
}
|
0cc83a335f396f4117a2082efff26e975e722bea.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "helper_cuda.h"
#include "src/window/window_context.h"
#include "src/controls/observer.h"
#include "src/rendering/coloured_mesh.h"
#include "src/rendering/simple_mesh.h"
#include "src/rendering/shader.h"
#include "src/rendering/shader_sources/ColouredShaderSources.h"
#include "src/rendering/shader_sources/SimpleShaderSources.h"
#include "src/rendering/grid_factory/CreateGridData.h"
#include "src/utilities/Matrix_3D.h"
#include <stdio.h>
#include <cuda_gl_interop.h> // this has to be included after some other headers, not sure which ones, havent tried all possibilities so I put this to the end, but at first this caused a compile error!!!
#include "GlobalVariables.h"
//#include "src/cuda_kernels/HeatEquationKernels.cuh"
#include "src/cuda_kernels/WaveEquationKernels.cuh"
void mouse_scroll_callback(GLFWwindow* window, double xoffset, double yoffset)
{
Observer* obsPtr = (Observer*)glfwGetWindowUserPointer(window);
if (yoffset > 0) { obsPtr->ZoomIn(1.1f); } // PARAMETER zoom multiplier
else if (yoffset < 0) { obsPtr->ZoomOut(1.1f); }
}
void SetTimeSpeed(MyWindow& appWindow, float& timeSpeed)
{
if (appWindow.IsKeyPressed(GLFW_KEY_SPACE)) { timeSpeed = 0.0f; }
if (appWindow.IsKeyPressed(GLFW_KEY_1)) { timeSpeed = 0.1f; }
if (appWindow.IsKeyPressed(GLFW_KEY_2)) { timeSpeed = 1.0f; }
if (appWindow.IsKeyPressed(GLFW_KEY_3)) { timeSpeed = 2.0f; }
if (appWindow.IsKeyPressed(GLFW_KEY_4)) { timeSpeed = 8.0f; }
}
int main()
{
uint32_t gridSize = 100;
uint32_t gridElements = gridSize * gridSize;
float amplitude = 0.2f * (float)100;
std::vector<float> g_WaveEquationInitialCondition_100x100;
g_WaveEquationInitialCondition_100x100.resize(100 * 100);
// for (int i = 0; i < 100; i++) { g_WaveEquationInitialCondition_100x100[5000+i] = 1.0f; }
g_WaveEquationInitialCondition_100x100[5050] = 0.001f;
std::vector<float> granulatedGrid = GridFactory::MapAmplitudeFields(gridSize, g_WaveEquationInitialCondition_100x100);
std::vector<float> flatGrid = GridFactory::MapAmplitudeFields(gridSize, g_FlatField_1x1);
MyWindow appWindow(g_WindowWidth, g_WindowHeight, "WaveEquation"); std::cout << glfwGetError(NULL) << "\n";
glfwSetWindowPos(appWindow.GetWindow(), 100, 200); std::cout << glfwGetError(NULL) << "\n";
appWindow.SetMouseScrollCallback(mouse_scroll_callback); std::cout << glfwGetError(NULL) << "\n";
Observer observer;
observer.translation = Vec3D(0.5f * (float)gridSize, 10.0f, -20.0f);// observer.TurnDown(0.5f);
appWindow.SetUserPointer(&observer);
// Create the grids
//std::vector<Vec3D> vertexData = GridFactory::CreateGridVertexData(gridSize, amplitude);
std::vector<Vec3D> vertexData = GridFactory::CreateGridVertexData_with_amplitudes(gridSize, amplitude, granulatedGrid);
std::vector<Vec3D> flatVertexData = GridFactory::CreateGridVertexData_with_amplitudes(gridSize, amplitude, flatGrid);
std::vector<uint32_t> indexData = GridFactory::CreateGridIndexData(gridSize);
SimpleMesh GridMesh_1(flatVertexData, indexData);
SimpleMesh GridMesh_2(vertexData, indexData);
SimpleMesh GridMesh_3(vertexData, indexData);
Shader simpleShader(VertexShader_Simple, FragmentShader_Simple);
{
simpleShader.Bind();
simpleShader.UploadUniformFloat3("body_translation", glm::vec3(0.0f, 0.0f, 0.0f));
simpleShader.UploadUniformMat3("body_orientation", glm::mat3(1.0f));
simpleShader.UploadUniformFloat("body_scale", 1.0f);
simpleShader.UploadUniformFloat3("observer_translation", glm::vec3(0.0f, 5.0f, -10.0f));
simpleShader.UploadUniformMat3("observer_orientation", glm::mat3(1.0f));
simpleShader.UploadUniformFloat("zoom_level", 1.0f);
simpleShader.UploadUniformFloat("aspect_ratio", (float)g_WindowWidth / (float)g_WindowHeight);
simpleShader.UploadUniformFloat("amplitude", amplitude/10.0f);
}
// Cuda functions that need to be called
// hipGraphicsGLRegisterBuffer // once after the vertex buffer has been created
// hipGraphicsMapResources // every time in the rendering loop
// hipGraphicsResourceGetMappedPointer // every time in the rendering loop
// hipGraphicsUnmapResources // every time in the rendering loop
// hipGraphicsUnregisterResource // once after the vertex buffer has been destroyed
struct cudaGraphicsResource* cuda_vbo_resource_1;
checkCudaErrors(hipGraphicsGLRegisterBuffer(&cuda_vbo_resource_1, (GLuint)GridMesh_1.m_VertexBuffer.m_RendererID, hipGraphicsMapFlagsNone));
struct cudaGraphicsResource* cuda_vbo_resource_2;
checkCudaErrors(hipGraphicsGLRegisterBuffer(&cuda_vbo_resource_2, (GLuint)GridMesh_2.m_VertexBuffer.m_RendererID, hipGraphicsMapFlagsNone));
struct cudaGraphicsResource* cuda_vbo_resource_3;
checkCudaErrors(hipGraphicsGLRegisterBuffer(&cuda_vbo_resource_3, (GLuint)GridMesh_3.m_VertexBuffer.m_RendererID, hipGraphicsMapFlagsNone));
uint32_t blockSize = 320;
float timeSpeed = 0.0f; // PARAMETER initial time speed
float timestep = 0.1f; // timestep can be initialized like this, because its constructor takes in only one float, implicit cast is possible
int counter = 0, draw_frequency = 10;
// Game loop
while (!glfwWindowShouldClose(appWindow.GetWindow()))
{
// appWindow.HandleUserInputs(observer, timestep);
// Set the speed of the simulation, note that the quality of the update will be worse, as the timestep will be bigger
SetTimeSpeed(appWindow, timeSpeed);
// observer.SetObserverInShader(simpleShader);
glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
if (timeSpeed > 0.0f)
{
// --------------------- //
// do the CUDA part here //
float3* dptr_1, * dptr_2, * dptr_3, * dptr_heatSrc;
size_t num_bytes;
checkCudaErrors(hipGraphicsMapResources(1, &cuda_vbo_resource_1, 0));
checkCudaErrors(hipGraphicsMapResources(1, &cuda_vbo_resource_2, 0));
checkCudaErrors(hipGraphicsMapResources(1, &cuda_vbo_resource_3, 0));
checkCudaErrors(hipGraphicsResourceGetMappedPointer((void**)&dptr_1, &num_bytes, cuda_vbo_resource_1));
checkCudaErrors(hipGraphicsResourceGetMappedPointer((void**)&dptr_2, &num_bytes, cuda_vbo_resource_2));
checkCudaErrors(hipGraphicsResourceGetMappedPointer((void**)&dptr_3, &num_bytes, cuda_vbo_resource_3));
// launch kernel here
hipLaunchKernelGGL(( WaveEquation_kernel) , dim3(gridElements / blockSize + 1), dim3(blockSize) , 0, 0, dptr_1, dptr_2, dptr_3, gridSize, timestep*timeSpeed);
hipLaunchKernelGGL(( WaveEquation_kernel) , dim3(gridElements / blockSize + 1), dim3(blockSize) , 0, 0, dptr_2, dptr_3, dptr_1, gridSize, timestep*timeSpeed);
hipLaunchKernelGGL(( WaveEquation_kernel) , dim3(gridElements / blockSize + 1), dim3(blockSize) , 0, 0, dptr_3, dptr_1, dptr_2, gridSize, timestep*timeSpeed);
checkCudaErrors(hipGraphicsUnmapResources(1, &cuda_vbo_resource_1, 0));
checkCudaErrors(hipGraphicsUnmapResources(1, &cuda_vbo_resource_2, 0));
checkCudaErrors(hipGraphicsUnmapResources(1, &cuda_vbo_resource_3, 0));
// --------------------- //
}
if (counter > draw_frequency)
{
appWindow.HandleUserInputs(observer, timestep);
observer.SetObserverInShader(simpleShader);
GridMesh_1.Draw();
glfwSwapBuffers(appWindow.GetWindow());
counter = 0;
}
counter++;
// Swap the screen buffers
// glfwSwapBuffers(appWindow.GetWindow());
}
checkCudaErrors(hipGraphicsUnregisterResource(cuda_vbo_resource_1));
checkCudaErrors(hipGraphicsUnregisterResource(cuda_vbo_resource_2));
checkCudaErrors(hipGraphicsUnregisterResource(cuda_vbo_resource_3));
// Terminates GLFW, clearing any resources allocated by GLFW.
glfwTerminate();
return 0;
}
| 0cc83a335f396f4117a2082efff26e975e722bea.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "helper_cuda.h"
#include "src/window/window_context.h"
#include "src/controls/observer.h"
#include "src/rendering/coloured_mesh.h"
#include "src/rendering/simple_mesh.h"
#include "src/rendering/shader.h"
#include "src/rendering/shader_sources/ColouredShaderSources.h"
#include "src/rendering/shader_sources/SimpleShaderSources.h"
#include "src/rendering/grid_factory/CreateGridData.h"
#include "src/utilities/Matrix_3D.h"
#include <stdio.h>
#include <cuda_gl_interop.h> // this has to be included after some other headers, not sure which ones, havent tried all possibilities so I put this to the end, but at first this caused a compile error!!!
#include "GlobalVariables.h"
//#include "src/cuda_kernels/HeatEquationKernels.cuh"
#include "src/cuda_kernels/WaveEquationKernels.cuh"
void mouse_scroll_callback(GLFWwindow* window, double xoffset, double yoffset)
{
Observer* obsPtr = (Observer*)glfwGetWindowUserPointer(window);
if (yoffset > 0) { obsPtr->ZoomIn(1.1f); } // PARAMETER zoom multiplier
else if (yoffset < 0) { obsPtr->ZoomOut(1.1f); }
}
void SetTimeSpeed(MyWindow& appWindow, float& timeSpeed)
{
if (appWindow.IsKeyPressed(GLFW_KEY_SPACE)) { timeSpeed = 0.0f; }
if (appWindow.IsKeyPressed(GLFW_KEY_1)) { timeSpeed = 0.1f; }
if (appWindow.IsKeyPressed(GLFW_KEY_2)) { timeSpeed = 1.0f; }
if (appWindow.IsKeyPressed(GLFW_KEY_3)) { timeSpeed = 2.0f; }
if (appWindow.IsKeyPressed(GLFW_KEY_4)) { timeSpeed = 8.0f; }
}
int main()
{
uint32_t gridSize = 100;
uint32_t gridElements = gridSize * gridSize;
float amplitude = 0.2f * (float)100;
std::vector<float> g_WaveEquationInitialCondition_100x100;
g_WaveEquationInitialCondition_100x100.resize(100 * 100);
// for (int i = 0; i < 100; i++) { g_WaveEquationInitialCondition_100x100[5000+i] = 1.0f; }
g_WaveEquationInitialCondition_100x100[5050] = 0.001f;
std::vector<float> granulatedGrid = GridFactory::MapAmplitudeFields(gridSize, g_WaveEquationInitialCondition_100x100);
std::vector<float> flatGrid = GridFactory::MapAmplitudeFields(gridSize, g_FlatField_1x1);
MyWindow appWindow(g_WindowWidth, g_WindowHeight, "WaveEquation"); std::cout << glfwGetError(NULL) << "\n";
glfwSetWindowPos(appWindow.GetWindow(), 100, 200); std::cout << glfwGetError(NULL) << "\n";
appWindow.SetMouseScrollCallback(mouse_scroll_callback); std::cout << glfwGetError(NULL) << "\n";
Observer observer;
observer.translation = Vec3D(0.5f * (float)gridSize, 10.0f, -20.0f);// observer.TurnDown(0.5f);
appWindow.SetUserPointer(&observer);
// Create the grids
//std::vector<Vec3D> vertexData = GridFactory::CreateGridVertexData(gridSize, amplitude);
std::vector<Vec3D> vertexData = GridFactory::CreateGridVertexData_with_amplitudes(gridSize, amplitude, granulatedGrid);
std::vector<Vec3D> flatVertexData = GridFactory::CreateGridVertexData_with_amplitudes(gridSize, amplitude, flatGrid);
std::vector<uint32_t> indexData = GridFactory::CreateGridIndexData(gridSize);
SimpleMesh GridMesh_1(flatVertexData, indexData);
SimpleMesh GridMesh_2(vertexData, indexData);
SimpleMesh GridMesh_3(vertexData, indexData);
Shader simpleShader(VertexShader_Simple, FragmentShader_Simple);
{
simpleShader.Bind();
simpleShader.UploadUniformFloat3("body_translation", glm::vec3(0.0f, 0.0f, 0.0f));
simpleShader.UploadUniformMat3("body_orientation", glm::mat3(1.0f));
simpleShader.UploadUniformFloat("body_scale", 1.0f);
simpleShader.UploadUniformFloat3("observer_translation", glm::vec3(0.0f, 5.0f, -10.0f));
simpleShader.UploadUniformMat3("observer_orientation", glm::mat3(1.0f));
simpleShader.UploadUniformFloat("zoom_level", 1.0f);
simpleShader.UploadUniformFloat("aspect_ratio", (float)g_WindowWidth / (float)g_WindowHeight);
simpleShader.UploadUniformFloat("amplitude", amplitude/10.0f);
}
// Cuda functions that need to be called
// cudaGraphicsGLRegisterBuffer // once after the vertex buffer has been created
// cudaGraphicsMapResources // every time in the rendering loop
// cudaGraphicsResourceGetMappedPointer // every time in the rendering loop
// cudaGraphicsUnmapResources // every time in the rendering loop
// cudaGraphicsUnregisterResource // once after the vertex buffer has been destroyed
struct cudaGraphicsResource* cuda_vbo_resource_1;
checkCudaErrors(cudaGraphicsGLRegisterBuffer(&cuda_vbo_resource_1, (GLuint)GridMesh_1.m_VertexBuffer.m_RendererID, cudaGraphicsMapFlagsNone));
struct cudaGraphicsResource* cuda_vbo_resource_2;
checkCudaErrors(cudaGraphicsGLRegisterBuffer(&cuda_vbo_resource_2, (GLuint)GridMesh_2.m_VertexBuffer.m_RendererID, cudaGraphicsMapFlagsNone));
struct cudaGraphicsResource* cuda_vbo_resource_3;
checkCudaErrors(cudaGraphicsGLRegisterBuffer(&cuda_vbo_resource_3, (GLuint)GridMesh_3.m_VertexBuffer.m_RendererID, cudaGraphicsMapFlagsNone));
uint32_t blockSize = 320;
float timeSpeed = 0.0f; // PARAMETER initial time speed
float timestep = 0.1f; // timestep can be initialized like this, because its constructor takes in only one float, implicit cast is possible
int counter = 0, draw_frequency = 10;
// Game loop
while (!glfwWindowShouldClose(appWindow.GetWindow()))
{
// appWindow.HandleUserInputs(observer, timestep);
// Set the speed of the simulation, note that the quality of the update will be worse, as the timestep will be bigger
SetTimeSpeed(appWindow, timeSpeed);
// observer.SetObserverInShader(simpleShader);
glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
if (timeSpeed > 0.0f)
{
// --------------------- //
// do the CUDA part here //
float3* dptr_1, * dptr_2, * dptr_3, * dptr_heatSrc;
size_t num_bytes;
checkCudaErrors(cudaGraphicsMapResources(1, &cuda_vbo_resource_1, 0));
checkCudaErrors(cudaGraphicsMapResources(1, &cuda_vbo_resource_2, 0));
checkCudaErrors(cudaGraphicsMapResources(1, &cuda_vbo_resource_3, 0));
checkCudaErrors(cudaGraphicsResourceGetMappedPointer((void**)&dptr_1, &num_bytes, cuda_vbo_resource_1));
checkCudaErrors(cudaGraphicsResourceGetMappedPointer((void**)&dptr_2, &num_bytes, cuda_vbo_resource_2));
checkCudaErrors(cudaGraphicsResourceGetMappedPointer((void**)&dptr_3, &num_bytes, cuda_vbo_resource_3));
// launch kernel here
WaveEquation_kernel <<<gridElements / blockSize + 1, blockSize >>> (dptr_1, dptr_2, dptr_3, gridSize, timestep*timeSpeed);
WaveEquation_kernel <<<gridElements / blockSize + 1, blockSize >>> (dptr_2, dptr_3, dptr_1, gridSize, timestep*timeSpeed);
WaveEquation_kernel <<<gridElements / blockSize + 1, blockSize >>> (dptr_3, dptr_1, dptr_2, gridSize, timestep*timeSpeed);
checkCudaErrors(cudaGraphicsUnmapResources(1, &cuda_vbo_resource_1, 0));
checkCudaErrors(cudaGraphicsUnmapResources(1, &cuda_vbo_resource_2, 0));
checkCudaErrors(cudaGraphicsUnmapResources(1, &cuda_vbo_resource_3, 0));
// --------------------- //
}
if (counter > draw_frequency)
{
appWindow.HandleUserInputs(observer, timestep);
observer.SetObserverInShader(simpleShader);
GridMesh_1.Draw();
glfwSwapBuffers(appWindow.GetWindow());
counter = 0;
}
counter++;
// Swap the screen buffers
// glfwSwapBuffers(appWindow.GetWindow());
}
checkCudaErrors(cudaGraphicsUnregisterResource(cuda_vbo_resource_1));
checkCudaErrors(cudaGraphicsUnregisterResource(cuda_vbo_resource_2));
checkCudaErrors(cudaGraphicsUnregisterResource(cuda_vbo_resource_3));
// Terminates GLFW, clearing any resources allocated by GLFW.
glfwTerminate();
return 0;
}
|
46d38ddbb375ea0338d9bdfe3ae7a4dc390b9cc5.hip | // !!! This is a file automatically generated by hipify!!!
// This file is auto-generated. See "generate_kernels.sh"
#include <ATen/native/transformers/hip/mem_eff_attention/kernel_backward.h>
INSTANTIATE_ATTENTION_KERNEL_BACKWARD_SM50(cutlass::bfloat16_t, false, 128);
INSTANTIATE_ATTENTION_KERNEL_BACKWARD_SM70(cutlass::bfloat16_t, false, 128);
INSTANTIATE_ATTENTION_KERNEL_BACKWARD_SM75(cutlass::bfloat16_t, false, 128);
INSTANTIATE_ATTENTION_KERNEL_BACKWARD_SM80(cutlass::bfloat16_t, false, 128);
| 46d38ddbb375ea0338d9bdfe3ae7a4dc390b9cc5.cu | // This file is auto-generated. See "generate_kernels.sh"
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
INSTANTIATE_ATTENTION_KERNEL_BACKWARD_SM50(cutlass::bfloat16_t, false, 128);
INSTANTIATE_ATTENTION_KERNEL_BACKWARD_SM70(cutlass::bfloat16_t, false, 128);
INSTANTIATE_ATTENTION_KERNEL_BACKWARD_SM75(cutlass::bfloat16_t, false, 128);
INSTANTIATE_ATTENTION_KERNEL_BACKWARD_SM80(cutlass::bfloat16_t, false, 128);
|
c1c61f7157523edf0ae7a8fbf859152d81b6afce.hip | // !!! This is a file automatically generated by hipify!!!
/**
*
* Date 03/07/2009
* ====
*
* Authors Vincent Garcia
* ======= Eric Debreuve
* Michel Barlaud
*
* Description Given a reference point set and a query point set, the program returns
* =========== firts the distance between each query point and its k nearest neighbors in
* the reference point set, and second the indexes of these k nearest neighbors.
* The computation is performed using the API NVIDIA CUDA.
*
* Paper Fast k nearest neighbor search using GPU
* =====
*
* BibTeX @INPROCEEDINGS{2008_garcia_cvgpu,
* ====== author = {V. Garcia and E. Debreuve and M. Barlaud},
* title = {Fast k nearest neighbor search using GPU},
* booktitle = {CVPR Workshop on Computer Vision on GPU},
* year = {2008},
* address = {Anchorage, Alaska, USA},
* month = {June}
* }
*
*/
// If the code is used in Matlab, set MATLAB_CODE to 1. Otherwise, set MATLAB_CODE to 0.
#define MATLAB_CODE 0
// Includes
#include <stdio.h>
#include <math.h>
#include "hip/hip_runtime.h"
#include "knn_cuda_with_indexes.h"
#if MATLAB_CODE == 1
#include "mex.h"
#else
#include <time.h>
#endif
// Constants used by the program
#define MAX_PITCH_VALUE_IN_BYTES 262144
#define MAX_TEXTURE_WIDTH_IN_BYTES 65536
#define MAX_TEXTURE_HEIGHT_IN_BYTES 32768
#define MAX_PART_OF_FREE_MEMORY_USED 0.9
#define BLOCK_DIM 16
// Texture containing the reference points (if it is possible)
texture<float, 2, hipReadModeElementType> texA;
//-----------------------------------------------------------------------------------------------//
// KERNELS //
//-----------------------------------------------------------------------------------------------//
/**
* Computes the distance between two matrix A (reference points) and
* B (query points) containing respectively wA and wB points.
* The matrix A is a texture.
*
* @param wA width of the matrix A = number of points in A
* @param B pointer on the matrix B
* @param wB width of the matrix B = number of points in B
* @param pB pitch of matrix B given in number of columns
* @param dim dimension of points = height of matrices A and B
* @param AB pointer on the matrix containing the wA*wB distances computed
*/
__global__ void cuComputeDistanceTexture(int wA, float * B, int wB, int pB, int dim, float* AB){
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
if ( xIndex<wB && yIndex<wA ){
float ssd = 0;
for (int i=0; i<dim; i++){
float tmp = tex2D(texA, (float)yIndex, (float)i) - B[ i * pB + xIndex ];
ssd += tmp * tmp;
}
AB[yIndex * pB + xIndex] = ssd;
// printf("IDX_X: %d IDX_Y: %d || B[0]: %f B[1]: %f | A[0]: %f A[1]: %f\n",xIndex,yIndex,B[xIndex],B[xIndex+pB],tex2D(texA, (float)yIndex, (float)0),tex2D(texA, (float)yIndex, (float)1));
}
}
/**
* Computes the distance between two matrix A (reference points) and
* B (query points) containing respectively wA and wB points.
*
* @param A pointer on the matrix A
* @param wA width of the matrix A = number of points in A
* @param pA pitch of matrix A given in number of columns
* @param B pointer on the matrix B
* @param wB width of the matrix B = number of points in B
* @param pB pitch of matrix B given in number of columns
* @param dim dimension of points = height of matrices A and B
* @param AB pointer on the matrix containing the wA*wB distances computed
*/
__global__ void cuComputeDistanceGlobal( float* A, int wA, int pA, float* B, int wB, int pB, int dim, float* AB){
// Declaration of the shared memory arrays As and Bs used to store the sub-matrix of A and B
__shared__ float shared_A[BLOCK_DIM][BLOCK_DIM];
__shared__ float shared_B[BLOCK_DIM][BLOCK_DIM];
// Sub-matrix of A (begin, step, end) and Sub-matrix of B (begin, step)
__shared__ int begin_A;
__shared__ int begin_B;
__shared__ int step_A;
__shared__ int step_B;
__shared__ int end_A;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Other variables
float tmp;
float ssd = 0;
// Loop parameters
begin_A = BLOCK_DIM * blockIdx.y;
begin_B = BLOCK_DIM * blockIdx.x;
step_A = BLOCK_DIM * pA;
step_B = BLOCK_DIM * pB;
end_A = begin_A + (dim-1) * pA;
// Conditions
int cond0 = (begin_A + tx < wA); // used to write in shared memory
int cond1 = (begin_B + tx < wB); // used to write in shared memory & to computations and to write in output matrix
int cond2 = (begin_A + ty < wA); // used to computations and to write in output matrix
// Loop over all the sub-matrices of A and B required to compute the block sub-matrix
for (int a = begin_A, b = begin_B; a <= end_A; a += step_A, b += step_B) {
// Load the matrices from device memory to shared memory; each thread loads one element of each matrix
if (a/pA + ty < dim){
shared_A[ty][tx] = (cond0)? A[a + pA * ty + tx] : 0;
shared_B[ty][tx] = (cond1)? B[b + pB * ty + tx] : 0;
}
else{
shared_A[ty][tx] = 0;
shared_B[ty][tx] = 0;
}
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Compute the difference between the two matrixes; each thread computes one element of the block sub-matrix
if (cond2 && cond1){
for (int k = 0; k < BLOCK_DIM; ++k){
tmp = shared_A[k][ty] - shared_B[k][tx];
ssd += tmp*tmp;
}
}
// Synchronize to make sure that the preceding computation is done before loading two new sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory; each thread writes one element
if (cond2 && cond1)
AB[ (begin_A + ty) * pB + begin_B + tx ] = ssd;
}
/**
* Computes the distance between two matrix A (reference points) and
* B (query points) containing respectively wA and wB points.
* The matrix A is a texture.
*
* @param wA width of the matrix A = number of points in A
* @param B pointer on the matrix B
* @param wB width of the matrix B = number of points in B
* @param pB pitch of matrix B given in number of columns
* @param dim dimension of points = height of matrices A and B
* @param AB pointer on the matrix containing the wA*wB distances computed
* @param norm norm value
* @param inf whether the norm is finite
*/
__global__ void cuComputeNormTexture(int wA, float * B, int wB, int pB, int dim, float* AB, float norm, bool inf){
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
if ( xIndex<wB && yIndex<wA ){
float ssd;
if (inf && (norm < 0)) {
ssd = 100.0f;
} else {
ssd = 0.0f;
}
for (int i=0; i<dim; i++){
float tmp = abs(tex2D(texA, (float)yIndex, (float)i) - B[ i * pB + xIndex ]);
if (inf) {
if (norm > 0) {
ssd = max(ssd,tmp);
} else {
ssd = min(ssd,tmp);
}
} else {
ssd += pow(tmp,norm);
}
}
if (!inf) {
//printf("%f\n",ssd);
ssd = pow(ssd,1/norm);
}
AB[yIndex * pB + xIndex] = ssd;
// printf("IDX_X: %d IDX_Y: %d || B[0]: %f B[1]: %f | A[0]: %f A[1]: %f\n",xIndex,yIndex,B[xIndex],B[xIndex+pB],tex2D(texA, (float)yIndex, (float)0),tex2D(texA, (float)yIndex, (float)1));
}
}
/**
* Computes the distance between two matrix A (reference points) and
* B (query points) containing respectively wA and wB points.
*
* @param A pointer on the matrix A
* @param wA width of the matrix A = number of points in A
* @param pA pitch of matrix A given in number of columns
* @param B pointer on the matrix B
* @param wB width of the matrix B = number of points in B
* @param pB pitch of matrix B given in number of columns
* @param dim dimension of points = height of matrices A and B
* @param AB pointer on the matrix containing the wA*wB distances computed
* @param norm norm value
* @param inf whether the norm is finite
*/
__global__ void cuComputeNormGlobal( float* A, int wA, int pA, float* B, int wB, int pB, int dim, float* AB, float norm, bool inf){
// Declaration of the shared memory arrays As and Bs used to store the sub-matrix of A and B
__shared__ float shared_A[BLOCK_DIM][BLOCK_DIM];
__shared__ float shared_B[BLOCK_DIM][BLOCK_DIM];
// Sub-matrix of A (begin, step, end) and Sub-matrix of B (begin, step)
__shared__ int begin_A;
__shared__ int begin_B;
__shared__ int step_A;
__shared__ int step_B;
__shared__ int end_A;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Other variables
float tmp;
float ssd;
if (inf && (norm < 0)) {
ssd = 100.0f;
} else {
ssd = 0.0f;
}
// Loop parameters
begin_A = BLOCK_DIM * blockIdx.y;
begin_B = BLOCK_DIM * blockIdx.x;
step_A = BLOCK_DIM * pA;
step_B = BLOCK_DIM * pB;
end_A = begin_A + (dim-1) * pA;
// Conditions
int cond0 = (begin_A + tx < wA); // used to write in shared memory
int cond1 = (begin_B + tx < wB); // used to write in shared memory & to computations and to write in output matrix
int cond2 = (begin_A + ty < wA); // used to computations and to write in output matrix
// Loop over all the sub-matrices of A and B required to compute the block sub-matrix
for (int a = begin_A, b = begin_B; a <= end_A; a += step_A, b += step_B) {
// Load the matrices from device memory to shared memory; each thread loads one element of each matrix
if (a/pA + ty < dim){
shared_A[ty][tx] = (cond0)? A[a + pA * ty + tx] : 0;
shared_B[ty][tx] = (cond1)? B[b + pB * ty + tx] : 0;
}
else{
shared_A[ty][tx] = 0;
shared_B[ty][tx] = 0;
}
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Compute the difference between the two matrixes; each thread computes one element of the block sub-matrix
if (cond2 && cond1){
for (int k = 0; k < BLOCK_DIM; ++k){
tmp = abs(shared_A[k][ty] - shared_B[k][tx]);
if (inf) {
if (norm > 0) {
ssd = max(ssd,tmp);
} else {
ssd = min(ssd,tmp);
}
} else {
ssd += pow(tmp,norm);
}
}
if (!inf) {
ssd = pow(ssd,1/norm);
}
}
// Synchronize to make sure that the preceding computation is done before loading two new sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory; each thread writes one element
if (cond2 && cond1)
AB[ (begin_A + ty) * pB + begin_B + tx ] = ssd;
}
/**
* Gathers k-th smallest distances for each column of the distance matrix in the top.
*
* @param dist distance matrix
* @param dist_pitch pitch of the distance matrix given in number of columns
* @param ind index matrix
* @param ind_pitch pitch of the index matrix given in number of columns
* @param width width of the distance matrix and of the index matrix
* @param height height of the distance matrix and of the index matrix
* @param k number of neighbors to consider
*/
__global__ void cuInsertionSort(float *dist, int dist_pitch, int *ind, int ind_pitch, int width, int height, int k){
// Variables
int l, i, j;
float *p_dist;
int *p_ind;
float curr_dist, max_dist;
int curr_row, max_row;
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (xIndex<width){
// Pointer shift, initialization, and max value
p_dist = dist + xIndex;
p_ind = ind + xIndex;
max_dist = p_dist[0];
p_ind[0] = 1;
// Part 1 : sort kth firt elementZ
for (l=1; l<k; l++){
curr_row = l * dist_pitch;
curr_dist = p_dist[curr_row];
if (curr_dist<max_dist){
i=l-1;
for (int a=0; a<l-1; a++){
if (p_dist[a*dist_pitch]>curr_dist){
i=a;
break;
}
}
for (j=l; j>i; j--){
p_dist[j*dist_pitch] = p_dist[(j-1)*dist_pitch];
p_ind[j*ind_pitch] = p_ind[(j-1)*ind_pitch];
}
p_dist[i*dist_pitch] = curr_dist;
p_ind[i*ind_pitch] = l+1;
}
else
p_ind[l*ind_pitch] = l+1;
max_dist = p_dist[curr_row];
}
// Part 2 : insert element in the k-th first lines
max_row = (k-1)*dist_pitch;
for (l=k; l<height; l++){
curr_dist = p_dist[l*dist_pitch];
if (curr_dist<max_dist){
i=k-1;
for (int a=0; a<k-1; a++){
if (p_dist[a*dist_pitch]>curr_dist){
i=a;
break;
}
}
for (j=k-1; j>i; j--){
p_dist[j*dist_pitch] = p_dist[(j-1)*dist_pitch];
p_ind[j*ind_pitch] = p_ind[(j-1)*ind_pitch];
}
p_dist[i*dist_pitch] = curr_dist;
p_ind[i*ind_pitch] = l+1;
max_dist = p_dist[max_row];
}
}
}
}
/**
* Computes the square root of the first line (width-th first element)
* of the distance matrix.
*
* @param dist distance matrix
* @param width width of the distance matrix
* @param pitch pitch of the distance matrix given in number of columns
* @param k number of neighbors to consider
*/
__global__ void cuParallelSqrt(float *dist, int width, int pitch, int k){
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
if (xIndex<width && yIndex<k)
dist[yIndex*pitch + xIndex] = sqrt(dist[yIndex*pitch + xIndex]);
}
//-----------------------------------------------------------------------------------------------//
// K-th NEAREST NEIGHBORS //
//-----------------------------------------------------------------------------------------------//
/**
* Prints the error message return during the memory allocation.
*
* @param error error value return by the memory allocation function
* @param memorySize size of memory tried to be allocated
*/
void knn_cuda_with_indexes::printErrorMessage(hipError_t error, int memorySize){
printf("==================================================\n");
printf("MEMORY ALLOCATION ERROR : %s\n", hipGetErrorString(error));
printf("Whished allocated memory : %d\n", memorySize);
printf("==================================================\n");
#if MATLAB_CODE == 1
mexErrMsgTxt("CUDA ERROR DURING MEMORY ALLOCATION");
#endif
}
/**
* K nearest neighbor algorithm
* - Initialize CUDA
* - Allocate device memory
* - Copy point sets (reference and query points) from host to device memory
* - Compute the distances + indexes to the k nearest neighbors for each query point
* - Copy distances from device to host memory
*
* @param ref_host reference points ; pointer to linear matrix
* @param ref_width number of reference points ; width of the matrix
* @param query_host query points ; pointer to linear matrix
* @param query_width number of query points ; width of the matrix
* @param height dimension of points ; height of the matrices
* @param k number of neighbor to consider
* @param dist_host distances to k nearest neighbors ; pointer to linear matrix
* @param dist_host indexes of the k nearest neighbors ; pointer to linear matrix
*
*/
void knn_cuda_with_indexes::knn(float* ref_host, int ref_width, float* query_host, int query_width, int height, int k, float* dist_host, int* ind_host){
unsigned int size_of_float = sizeof(float);
unsigned int size_of_int = sizeof(int);
// Variables
float *query_dev;
float *ref_dev;
float *dist_dev;
int *ind_dev;
hipArray *ref_array;
hipError_t result;
size_t query_pitch;
size_t query_pitch_in_bytes;
size_t ref_pitch;
size_t ref_pitch_in_bytes;
size_t ind_pitch;
size_t ind_pitch_in_bytes;
size_t max_nb_query_traited;
size_t actual_nb_query_width;
unsigned long memory_total;
unsigned long memory_free;
// Check if we can use texture memory for reference points
unsigned int use_texture = ( ref_width*size_of_float<=MAX_TEXTURE_WIDTH_IN_BYTES && height*size_of_float<=MAX_TEXTURE_HEIGHT_IN_BYTES );
// CUDA Initialisation
hipInit(0);
// Check free memory using driver API ; only (MAX_PART_OF_FREE_MEMORY_USED*100)% of memory will be used
hipCtx_t cuContext;
hipDevice_t cuDevice=0;
hipCtxCreate(&cuContext, 0, cuDevice);
cuMemGetInfo(&memory_free, &memory_total);
hipCtxDetach (cuContext);
// Determine maximum number of query that can be treated
max_nb_query_traited = ( memory_free * MAX_PART_OF_FREE_MEMORY_USED - size_of_float * ref_width*height ) / ( size_of_float * (height + ref_width) + size_of_int * k);
max_nb_query_traited = min( (unsigned long)query_width, (max_nb_query_traited / 16) * 16 );
// Allocation of global memory for query points and for distances
result = hipMallocPitch( (void **) &query_dev, &query_pitch_in_bytes, max_nb_query_traited * size_of_float, height + ref_width);
if (result){
knn_cuda_with_indexes::printErrorMessage(result, max_nb_query_traited*size_of_float*(height+ref_width));
return;
}
query_pitch = query_pitch_in_bytes/size_of_float;
dist_dev = query_dev + height * query_pitch;
// Allocation of global memory for indexes
result = hipMallocPitch( (void **) &ind_dev, &ind_pitch_in_bytes, max_nb_query_traited * size_of_int, k);
if (result){
hipFree(query_dev);
knn_cuda_with_indexes::printErrorMessage(result, max_nb_query_traited*size_of_int*k);
return;
}
ind_pitch = ind_pitch_in_bytes/size_of_int;
// Allocation of memory (global or texture) for reference points
if (use_texture){
// Allocation of texture memory
hipChannelFormatDesc channelDescA = hipCreateChannelDesc<float>();
result = hipMallocArray( &ref_array, &channelDescA, ref_width, height );
if (result){
knn_cuda_with_indexes::printErrorMessage(result, ref_width*height*size_of_float);
hipFree(ind_dev);
hipFree(query_dev);
return;
}
hipMemcpyToArray( ref_array, 0, 0, ref_host, ref_width * height * size_of_float, hipMemcpyHostToDevice );
// Set texture parameters and bind texture to array
texA.addressMode[0] = hipAddressModeClamp;
texA.addressMode[1] = hipAddressModeClamp;
texA.filterMode = hipFilterModePoint;
texA.normalized = 0;
hipBindTextureToArray(texA, ref_array);
}
else{
// Allocation of global memory
result = hipMallocPitch( (void **) &ref_dev, &ref_pitch_in_bytes, ref_width * size_of_float, height);
if (result){
knn_cuda_with_indexes::printErrorMessage(result, ref_width*size_of_float*height);
hipFree(ind_dev);
hipFree(query_dev);
return;
}
ref_pitch = ref_pitch_in_bytes/size_of_float;
hipMemcpy2D(ref_dev, ref_pitch_in_bytes, ref_host, ref_width*size_of_float, ref_width*size_of_float, height, hipMemcpyHostToDevice);
}
// Split queries to fit in GPU memory
for (int i=0; i<query_width; i+=max_nb_query_traited){
// Number of query points considered
actual_nb_query_width = min( (unsigned long)max_nb_query_traited, (unsigned long)(query_width-i) );
// Copy of part of query actually being treated
hipMemcpy2D(query_dev, query_pitch_in_bytes, &query_host[i], query_width*size_of_float, actual_nb_query_width*size_of_float, height, hipMemcpyHostToDevice);
// Grids ans threads
dim3 g_16x16(actual_nb_query_width/16, ref_width/16, 1);
dim3 t_16x16(16, 16, 1);
if (actual_nb_query_width%16 != 0) g_16x16.x += 1;
if (ref_width %16 != 0) g_16x16.y += 1;
//
dim3 g_256x1(actual_nb_query_width/256, 1, 1);
dim3 t_256x1(256, 1, 1);
if (actual_nb_query_width%256 != 0) g_256x1.x += 1;
//
dim3 g_k_16x16(actual_nb_query_width/16, k/16, 1);
dim3 t_k_16x16(16, 16, 1);
if (actual_nb_query_width%16 != 0) g_k_16x16.x += 1;
if (k %16 != 0) g_k_16x16.y += 1;
// Kernel 1: Compute all the distances
if (use_texture)
hipLaunchKernelGGL(( cuComputeDistanceTexture), dim3(g_16x16),dim3(t_16x16), 0, 0, ref_width, query_dev, actual_nb_query_width, query_pitch, height, dist_dev);
else
hipLaunchKernelGGL(( cuComputeDistanceGlobal), dim3(g_16x16),dim3(t_16x16), 0, 0, ref_dev, ref_width, ref_pitch, query_dev, actual_nb_query_width, query_pitch, height, dist_dev);
// Kernel 2: Sort each column
hipLaunchKernelGGL(( cuInsertionSort), dim3(g_256x1),dim3(t_256x1), 0, 0, dist_dev, query_pitch, ind_dev, ind_pitch, actual_nb_query_width, ref_width, k);
// Kernel 3: Compute square root of k first elements
hipLaunchKernelGGL(( cuParallelSqrt), dim3(g_k_16x16),dim3(t_k_16x16), 0, 0, dist_dev, query_width, query_pitch, k);
// Memory copy of output from device to host
hipMemcpy2D(&dist_host[i], query_width*size_of_float, dist_dev, query_pitch_in_bytes, actual_nb_query_width*size_of_float, k, hipMemcpyDeviceToHost);
hipMemcpy2D(&ind_host[i], query_width*size_of_int, ind_dev, ind_pitch_in_bytes, actual_nb_query_width*size_of_int, k, hipMemcpyDeviceToHost);
}
// Free memory
if (use_texture)
hipFreeArray(ref_array);
else
hipFree(ref_dev);
hipFree(ind_dev);
hipFree(query_dev);
}
void knn_cuda_with_indexes::computeDistances(float *ref_host, int ref_width, float *query_host, int query_width, int height, float *dist_host, bool inf, float norm) {
unsigned int size_of_float = sizeof(float);
unsigned int size_of_int = sizeof(int);
// Variables
float *query_dev;
float *ref_dev;
float *dist_dev;
hipArray *ref_array;
hipError_t result;
size_t query_pitch;
size_t query_pitch_in_bytes;
size_t ref_pitch;
size_t ref_pitch_in_bytes;
size_t max_nb_query_traited;
size_t actual_nb_query_width;
unsigned long memory_total;
unsigned long memory_free;
// Check if we can use texture memory for reference points
unsigned int use_texture = ( ref_width*size_of_float<=MAX_TEXTURE_WIDTH_IN_BYTES && height*size_of_float<=MAX_TEXTURE_HEIGHT_IN_BYTES );
// CUDA Initialisation
hipInit(0);
// Check free memory using driver API ; only (MAX_PART_OF_FREE_MEMORY_USED*100)% of memory will be used
hipCtx_t cuContext;
hipDevice_t cuDevice=0;
hipCtxCreate(&cuContext, 0, cuDevice);
cuMemGetInfo(&memory_free, &memory_total);
hipCtxDetach (cuContext);
// Determine maximum number of query that can be treated
max_nb_query_traited = ( memory_free * MAX_PART_OF_FREE_MEMORY_USED - size_of_float * ref_width*height ) / ( size_of_float * (height + ref_width) + size_of_int * ref_width);
max_nb_query_traited = min( (unsigned long)query_width, (max_nb_query_traited / 16) * 16 );
// Allocation of global memory for query points and for distances
result = hipMallocPitch( (void **) &query_dev, &query_pitch_in_bytes, max_nb_query_traited * size_of_float, height + ref_width);
if (result){
knn_cuda_with_indexes::printErrorMessage(result, max_nb_query_traited*size_of_float*(height+ref_width));
return;
}
query_pitch = query_pitch_in_bytes/size_of_float;
dist_dev = query_dev + height * query_pitch;
// Allocation of memory (global or texture) for reference points
if (use_texture){
// Allocation of texture memory
hipChannelFormatDesc channelDescA = hipCreateChannelDesc<float>();
result = hipMallocArray( &ref_array, &channelDescA, ref_width, height );
if (result){
knn_cuda_with_indexes::printErrorMessage(result, ref_width*height*size_of_float);
hipFree(query_dev);
return;
}
hipMemcpyToArray( ref_array, 0, 0, ref_host, ref_width * height * size_of_float, hipMemcpyHostToDevice );
// Set texture parameters and bind texture to array
texA.addressMode[0] = hipAddressModeClamp;
texA.addressMode[1] = hipAddressModeClamp;
texA.filterMode = hipFilterModePoint;
texA.normalized = 0;
hipBindTextureToArray(texA, ref_array);
} else{
// Allocation of global memory
result = hipMallocPitch( (void **) &ref_dev, &ref_pitch_in_bytes, ref_width * size_of_float, height);
if (result){
knn_cuda_with_indexes::printErrorMessage(result, ref_width*size_of_float*height);
hipFree(query_dev);
return;
}
ref_pitch = ref_pitch_in_bytes/size_of_float;
hipMemcpy2D(ref_dev, ref_pitch_in_bytes, ref_host, ref_width*size_of_float, ref_width*size_of_float, height, hipMemcpyHostToDevice);
}
// Split queries to fit in GPU memory
for (int i=0; i<query_width; i+=max_nb_query_traited){
// Number of query points considered
actual_nb_query_width = min( (unsigned long)max_nb_query_traited, (unsigned long)(query_width-i) );
// Copy of part of query actually being treated
hipMemcpy2D(query_dev, query_pitch_in_bytes, &query_host[i], query_width*size_of_float, actual_nb_query_width*size_of_float, height, hipMemcpyHostToDevice);
// Grids ans threads
dim3 g_16x16(actual_nb_query_width/16, ref_width/16, 1);
dim3 t_16x16(16, 16, 1);
if (actual_nb_query_width%16 != 0) g_16x16.x += 1;
if (ref_width %16 != 0) g_16x16.y += 1;
//
dim3 g_256x1(actual_nb_query_width/256, 1, 1);
if (actual_nb_query_width%256 != 0) g_256x1.x += 1;
//
dim3 g_k_16x16(actual_nb_query_width/16, ref_width/16, 1);
dim3 t_k_16x16(16, 16, 1);
if (actual_nb_query_width%16 != 0) g_k_16x16.x += 1;
if (ref_width %16 != 0) g_k_16x16.y += 1;
// Kernel 1: Compute all the distances
if (use_texture)
hipLaunchKernelGGL(( cuComputeNormTexture), dim3(g_16x16),dim3(t_16x16), 0, 0, ref_width, query_dev, actual_nb_query_width, query_pitch, height, dist_dev, norm, inf);
else
hipLaunchKernelGGL(( cuComputeNormGlobal), dim3(g_16x16),dim3(t_16x16), 0, 0, ref_dev, ref_width, ref_pitch, query_dev, actual_nb_query_width, query_pitch, height, dist_dev, norm, inf);
// Memory copy of output from device to host
hipMemcpy2D(&dist_host[i], query_width*size_of_float, dist_dev, query_pitch_in_bytes, actual_nb_query_width*size_of_float, ref_width, hipMemcpyDeviceToHost);
}
// Free memory
if (use_texture)
hipFreeArray(ref_array);
else
hipFree(ref_dev);
hipFree(query_dev);
}
//-----------------------------------------------------------------------------------------------//
// MATLAB INTERFACE & C EXAMPLE //
//-----------------------------------------------------------------------------------------------//
#if MATLAB_CODE == 1
/**
* Interface to use CUDA code in Matlab (gateway routine).
*
* @param nlhs Number of expected mxArrays (Left Hand Side)
* @param plhs Array of pointers to expected outputs
* @param nrhs Number of inputs (Right Hand Side)
* @param prhs Array of pointers to input data. The input data is read-only and should not be altered by your mexFunction .
*/
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]){
// Variables
float* ref;
int ref_width;
int ref_height;
float* query;
int query_width;
int query_height;
float* dist;
int* ind;
int k;
// Reference points
ref = (float *) mxGetData(prhs[0]);
ref_width = mxGetM(prhs[0]);
ref_height = mxGetN(prhs[0]);
// Query points
query = (float *) mxGetData(prhs[1]);
query_width = mxGetM(prhs[1]);
query_height = mxGetN(prhs[1]);
// Number of neighbors to consider
k = (int)mxGetScalar(prhs[2]);
// Verification of the reference point and query point sizes
if (ref_height!=query_height)
mexErrMsgTxt("Data must have the same dimension");
if (ref_width*sizeof(float)>MAX_PITCH_VALUE_IN_BYTES)
mexErrMsgTxt("Reference number is too large for CUDA (Max=65536)");
if (query_width*sizeof(float)>MAX_PITCH_VALUE_IN_BYTES)
mexErrMsgTxt("Query number is too large for CUDA (Max=65536)");
// Allocation of output arrays
dist = (float *) mxGetPr(plhs[0] = mxCreateNumericMatrix(query_width, k, mxSINGLE_CLASS, mxREAL));
ind = (int *) mxGetPr(plhs[1] = mxCreateNumericMatrix(query_width, k, mxINT32_CLASS, mxREAL));
// Call KNN CUDA
knn(ref, ref_width, query, query_width, ref_height, k, dist, ind);
}
#else // C code
/**
* Example of use of kNN search CUDA.
*/
//int main(void){
// // Variables and parameters
// float* ref; // Pointer to reference point array
// float* query; // Pointer to query point array
// float* dist; // Pointer to distance array
// int* ind; // Pointer to index array
// int ref_nb = 4096; // Reference point number, max=65535
// int query_nb = 4096; // Query point number, max=65535
// int dim = 32; // Dimension of points
// int k = 20; // Nearest neighbors to consider
// int iterations = 100;
// int i;
// // Memory allocation
// ref = (float *) malloc(ref_nb * dim * sizeof(float));
// query = (float *) malloc(query_nb * dim * sizeof(float));
// dist = (float *) malloc(query_nb * k * sizeof(float));
// ind = (int *) malloc(query_nb * k * sizeof(float));
// // Init
// srand(time(NULL));
// for (i=0 ; i<ref_nb * dim ; i++) ref[i] = (float)rand() / (float)RAND_MAX;
// for (i=0 ; i<query_nb * dim ; i++) query[i] = (float)rand() / (float)RAND_MAX;
// // Variables for duration evaluation
// hipEvent_t start, stop;
// hipEventCreate(&start);
// hipEventCreate(&stop);
// float elapsed_time;
// // Display informations
// printf("Number of reference points : %6d\n", ref_nb );
// printf("Number of query points : %6d\n", query_nb);
// printf("Dimension of points : %4d\n", dim );
// printf("Number of neighbors to consider : %4d\n", k );
// printf("Processing kNN search :" );
// // Call kNN search CUDA
// hipEventRecord(start, 0);
// for (i=0; i<iterations; i++)
// knn(ref, ref_nb, query, query_nb, dim, k, dist, ind);
// hipEventRecord(stop, 0);
// hipEventSynchronize(stop);
// hipEventElapsedTime(&elapsed_time, start, stop);
// printf(" done in %f s for %d iterations (%f s by iteration)\n", elapsed_time/1000, iterations, elapsed_time/(iterations*1000));
// // Destroy cuda event object and free memory
// hipEventDestroy(start);
// hipEventDestroy(stop);
// free(ind);
// free(dist);
// free(query);
// free(ref);
//}
#endif
| c1c61f7157523edf0ae7a8fbf859152d81b6afce.cu | /**
*
* Date 03/07/2009
* ====
*
* Authors Vincent Garcia
* ======= Eric Debreuve
* Michel Barlaud
*
* Description Given a reference point set and a query point set, the program returns
* =========== firts the distance between each query point and its k nearest neighbors in
* the reference point set, and second the indexes of these k nearest neighbors.
* The computation is performed using the API NVIDIA CUDA.
*
* Paper Fast k nearest neighbor search using GPU
* =====
*
* BibTeX @INPROCEEDINGS{2008_garcia_cvgpu,
* ====== author = {V. Garcia and E. Debreuve and M. Barlaud},
* title = {Fast k nearest neighbor search using GPU},
* booktitle = {CVPR Workshop on Computer Vision on GPU},
* year = {2008},
* address = {Anchorage, Alaska, USA},
* month = {June}
* }
*
*/
// If the code is used in Matlab, set MATLAB_CODE to 1. Otherwise, set MATLAB_CODE to 0.
#define MATLAB_CODE 0
// Includes
#include <stdio.h>
#include <math.h>
#include "cuda.h"
#include "knn_cuda_with_indexes.h"
#if MATLAB_CODE == 1
#include "mex.h"
#else
#include <time.h>
#endif
// Constants used by the program
#define MAX_PITCH_VALUE_IN_BYTES 262144
#define MAX_TEXTURE_WIDTH_IN_BYTES 65536
#define MAX_TEXTURE_HEIGHT_IN_BYTES 32768
#define MAX_PART_OF_FREE_MEMORY_USED 0.9
#define BLOCK_DIM 16
// Texture containing the reference points (if it is possible)
texture<float, 2, cudaReadModeElementType> texA;
//-----------------------------------------------------------------------------------------------//
// KERNELS //
//-----------------------------------------------------------------------------------------------//
/**
* Computes the distance between two matrix A (reference points) and
* B (query points) containing respectively wA and wB points.
* The matrix A is a texture.
*
* @param wA width of the matrix A = number of points in A
* @param B pointer on the matrix B
* @param wB width of the matrix B = number of points in B
* @param pB pitch of matrix B given in number of columns
* @param dim dimension of points = height of matrices A and B
* @param AB pointer on the matrix containing the wA*wB distances computed
*/
__global__ void cuComputeDistanceTexture(int wA, float * B, int wB, int pB, int dim, float* AB){
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
if ( xIndex<wB && yIndex<wA ){
float ssd = 0;
for (int i=0; i<dim; i++){
float tmp = tex2D(texA, (float)yIndex, (float)i) - B[ i * pB + xIndex ];
ssd += tmp * tmp;
}
AB[yIndex * pB + xIndex] = ssd;
// printf("IDX_X: %d IDX_Y: %d || B[0]: %f B[1]: %f | A[0]: %f A[1]: %f\n",xIndex,yIndex,B[xIndex],B[xIndex+pB],tex2D(texA, (float)yIndex, (float)0),tex2D(texA, (float)yIndex, (float)1));
}
}
/**
* Computes the distance between two matrix A (reference points) and
* B (query points) containing respectively wA and wB points.
*
* @param A pointer on the matrix A
* @param wA width of the matrix A = number of points in A
* @param pA pitch of matrix A given in number of columns
* @param B pointer on the matrix B
* @param wB width of the matrix B = number of points in B
* @param pB pitch of matrix B given in number of columns
* @param dim dimension of points = height of matrices A and B
* @param AB pointer on the matrix containing the wA*wB distances computed
*/
__global__ void cuComputeDistanceGlobal( float* A, int wA, int pA, float* B, int wB, int pB, int dim, float* AB){
// Declaration of the shared memory arrays As and Bs used to store the sub-matrix of A and B
__shared__ float shared_A[BLOCK_DIM][BLOCK_DIM];
__shared__ float shared_B[BLOCK_DIM][BLOCK_DIM];
// Sub-matrix of A (begin, step, end) and Sub-matrix of B (begin, step)
__shared__ int begin_A;
__shared__ int begin_B;
__shared__ int step_A;
__shared__ int step_B;
__shared__ int end_A;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Other variables
float tmp;
float ssd = 0;
// Loop parameters
begin_A = BLOCK_DIM * blockIdx.y;
begin_B = BLOCK_DIM * blockIdx.x;
step_A = BLOCK_DIM * pA;
step_B = BLOCK_DIM * pB;
end_A = begin_A + (dim-1) * pA;
// Conditions
int cond0 = (begin_A + tx < wA); // used to write in shared memory
int cond1 = (begin_B + tx < wB); // used to write in shared memory & to computations and to write in output matrix
int cond2 = (begin_A + ty < wA); // used to computations and to write in output matrix
// Loop over all the sub-matrices of A and B required to compute the block sub-matrix
for (int a = begin_A, b = begin_B; a <= end_A; a += step_A, b += step_B) {
// Load the matrices from device memory to shared memory; each thread loads one element of each matrix
if (a/pA + ty < dim){
shared_A[ty][tx] = (cond0)? A[a + pA * ty + tx] : 0;
shared_B[ty][tx] = (cond1)? B[b + pB * ty + tx] : 0;
}
else{
shared_A[ty][tx] = 0;
shared_B[ty][tx] = 0;
}
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Compute the difference between the two matrixes; each thread computes one element of the block sub-matrix
if (cond2 && cond1){
for (int k = 0; k < BLOCK_DIM; ++k){
tmp = shared_A[k][ty] - shared_B[k][tx];
ssd += tmp*tmp;
}
}
// Synchronize to make sure that the preceding computation is done before loading two new sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory; each thread writes one element
if (cond2 && cond1)
AB[ (begin_A + ty) * pB + begin_B + tx ] = ssd;
}
/**
* Computes the distance between two matrix A (reference points) and
* B (query points) containing respectively wA and wB points.
* The matrix A is a texture.
*
* @param wA width of the matrix A = number of points in A
* @param B pointer on the matrix B
* @param wB width of the matrix B = number of points in B
* @param pB pitch of matrix B given in number of columns
* @param dim dimension of points = height of matrices A and B
* @param AB pointer on the matrix containing the wA*wB distances computed
* @param norm norm value
* @param inf whether the norm is finite
*/
__global__ void cuComputeNormTexture(int wA, float * B, int wB, int pB, int dim, float* AB, float norm, bool inf){
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
if ( xIndex<wB && yIndex<wA ){
float ssd;
if (inf && (norm < 0)) {
ssd = 100.0f;
} else {
ssd = 0.0f;
}
for (int i=0; i<dim; i++){
float tmp = abs(tex2D(texA, (float)yIndex, (float)i) - B[ i * pB + xIndex ]);
if (inf) {
if (norm > 0) {
ssd = max(ssd,tmp);
} else {
ssd = min(ssd,tmp);
}
} else {
ssd += pow(tmp,norm);
}
}
if (!inf) {
//printf("%f\n",ssd);
ssd = pow(ssd,1/norm);
}
AB[yIndex * pB + xIndex] = ssd;
// printf("IDX_X: %d IDX_Y: %d || B[0]: %f B[1]: %f | A[0]: %f A[1]: %f\n",xIndex,yIndex,B[xIndex],B[xIndex+pB],tex2D(texA, (float)yIndex, (float)0),tex2D(texA, (float)yIndex, (float)1));
}
}
/**
* Computes the distance between two matrix A (reference points) and
* B (query points) containing respectively wA and wB points.
*
* @param A pointer on the matrix A
* @param wA width of the matrix A = number of points in A
* @param pA pitch of matrix A given in number of columns
* @param B pointer on the matrix B
* @param wB width of the matrix B = number of points in B
* @param pB pitch of matrix B given in number of columns
* @param dim dimension of points = height of matrices A and B
* @param AB pointer on the matrix containing the wA*wB distances computed
* @param norm norm value
* @param inf whether the norm is finite
*/
__global__ void cuComputeNormGlobal( float* A, int wA, int pA, float* B, int wB, int pB, int dim, float* AB, float norm, bool inf){
// Declaration of the shared memory arrays As and Bs used to store the sub-matrix of A and B
__shared__ float shared_A[BLOCK_DIM][BLOCK_DIM];
__shared__ float shared_B[BLOCK_DIM][BLOCK_DIM];
// Sub-matrix of A (begin, step, end) and Sub-matrix of B (begin, step)
__shared__ int begin_A;
__shared__ int begin_B;
__shared__ int step_A;
__shared__ int step_B;
__shared__ int end_A;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Other variables
float tmp;
float ssd;
if (inf && (norm < 0)) {
ssd = 100.0f;
} else {
ssd = 0.0f;
}
// Loop parameters
begin_A = BLOCK_DIM * blockIdx.y;
begin_B = BLOCK_DIM * blockIdx.x;
step_A = BLOCK_DIM * pA;
step_B = BLOCK_DIM * pB;
end_A = begin_A + (dim-1) * pA;
// Conditions
int cond0 = (begin_A + tx < wA); // used to write in shared memory
int cond1 = (begin_B + tx < wB); // used to write in shared memory & to computations and to write in output matrix
int cond2 = (begin_A + ty < wA); // used to computations and to write in output matrix
// Loop over all the sub-matrices of A and B required to compute the block sub-matrix
for (int a = begin_A, b = begin_B; a <= end_A; a += step_A, b += step_B) {
// Load the matrices from device memory to shared memory; each thread loads one element of each matrix
if (a/pA + ty < dim){
shared_A[ty][tx] = (cond0)? A[a + pA * ty + tx] : 0;
shared_B[ty][tx] = (cond1)? B[b + pB * ty + tx] : 0;
}
else{
shared_A[ty][tx] = 0;
shared_B[ty][tx] = 0;
}
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Compute the difference between the two matrixes; each thread computes one element of the block sub-matrix
if (cond2 && cond1){
for (int k = 0; k < BLOCK_DIM; ++k){
tmp = abs(shared_A[k][ty] - shared_B[k][tx]);
if (inf) {
if (norm > 0) {
ssd = max(ssd,tmp);
} else {
ssd = min(ssd,tmp);
}
} else {
ssd += pow(tmp,norm);
}
}
if (!inf) {
ssd = pow(ssd,1/norm);
}
}
// Synchronize to make sure that the preceding computation is done before loading two new sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory; each thread writes one element
if (cond2 && cond1)
AB[ (begin_A + ty) * pB + begin_B + tx ] = ssd;
}
/**
* Gathers k-th smallest distances for each column of the distance matrix in the top.
*
* @param dist distance matrix
* @param dist_pitch pitch of the distance matrix given in number of columns
* @param ind index matrix
* @param ind_pitch pitch of the index matrix given in number of columns
* @param width width of the distance matrix and of the index matrix
* @param height height of the distance matrix and of the index matrix
* @param k number of neighbors to consider
*/
__global__ void cuInsertionSort(float *dist, int dist_pitch, int *ind, int ind_pitch, int width, int height, int k){
// Variables
int l, i, j;
float *p_dist;
int *p_ind;
float curr_dist, max_dist;
int curr_row, max_row;
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (xIndex<width){
// Pointer shift, initialization, and max value
p_dist = dist + xIndex;
p_ind = ind + xIndex;
max_dist = p_dist[0];
p_ind[0] = 1;
// Part 1 : sort kth firt elementZ
for (l=1; l<k; l++){
curr_row = l * dist_pitch;
curr_dist = p_dist[curr_row];
if (curr_dist<max_dist){
i=l-1;
for (int a=0; a<l-1; a++){
if (p_dist[a*dist_pitch]>curr_dist){
i=a;
break;
}
}
for (j=l; j>i; j--){
p_dist[j*dist_pitch] = p_dist[(j-1)*dist_pitch];
p_ind[j*ind_pitch] = p_ind[(j-1)*ind_pitch];
}
p_dist[i*dist_pitch] = curr_dist;
p_ind[i*ind_pitch] = l+1;
}
else
p_ind[l*ind_pitch] = l+1;
max_dist = p_dist[curr_row];
}
// Part 2 : insert element in the k-th first lines
max_row = (k-1)*dist_pitch;
for (l=k; l<height; l++){
curr_dist = p_dist[l*dist_pitch];
if (curr_dist<max_dist){
i=k-1;
for (int a=0; a<k-1; a++){
if (p_dist[a*dist_pitch]>curr_dist){
i=a;
break;
}
}
for (j=k-1; j>i; j--){
p_dist[j*dist_pitch] = p_dist[(j-1)*dist_pitch];
p_ind[j*ind_pitch] = p_ind[(j-1)*ind_pitch];
}
p_dist[i*dist_pitch] = curr_dist;
p_ind[i*ind_pitch] = l+1;
max_dist = p_dist[max_row];
}
}
}
}
/**
* Computes the square root of the first line (width-th first element)
* of the distance matrix.
*
* @param dist distance matrix
* @param width width of the distance matrix
* @param pitch pitch of the distance matrix given in number of columns
* @param k number of neighbors to consider
*/
__global__ void cuParallelSqrt(float *dist, int width, int pitch, int k){
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
if (xIndex<width && yIndex<k)
dist[yIndex*pitch + xIndex] = sqrt(dist[yIndex*pitch + xIndex]);
}
//-----------------------------------------------------------------------------------------------//
// K-th NEAREST NEIGHBORS //
//-----------------------------------------------------------------------------------------------//
/**
* Prints the error message return during the memory allocation.
*
* @param error error value return by the memory allocation function
* @param memorySize size of memory tried to be allocated
*/
void knn_cuda_with_indexes::printErrorMessage(cudaError_t error, int memorySize){
printf("==================================================\n");
printf("MEMORY ALLOCATION ERROR : %s\n", cudaGetErrorString(error));
printf("Whished allocated memory : %d\n", memorySize);
printf("==================================================\n");
#if MATLAB_CODE == 1
mexErrMsgTxt("CUDA ERROR DURING MEMORY ALLOCATION");
#endif
}
/**
* K nearest neighbor algorithm
* - Initialize CUDA
* - Allocate device memory
* - Copy point sets (reference and query points) from host to device memory
* - Compute the distances + indexes to the k nearest neighbors for each query point
* - Copy distances from device to host memory
*
* @param ref_host reference points ; pointer to linear matrix
* @param ref_width number of reference points ; width of the matrix
* @param query_host query points ; pointer to linear matrix
* @param query_width number of query points ; width of the matrix
* @param height dimension of points ; height of the matrices
* @param k number of neighbor to consider
* @param dist_host distances to k nearest neighbors ; pointer to linear matrix
* @param dist_host indexes of the k nearest neighbors ; pointer to linear matrix
*
*/
void knn_cuda_with_indexes::knn(float* ref_host, int ref_width, float* query_host, int query_width, int height, int k, float* dist_host, int* ind_host){
unsigned int size_of_float = sizeof(float);
unsigned int size_of_int = sizeof(int);
// Variables
float *query_dev;
float *ref_dev;
float *dist_dev;
int *ind_dev;
cudaArray *ref_array;
cudaError_t result;
size_t query_pitch;
size_t query_pitch_in_bytes;
size_t ref_pitch;
size_t ref_pitch_in_bytes;
size_t ind_pitch;
size_t ind_pitch_in_bytes;
size_t max_nb_query_traited;
size_t actual_nb_query_width;
unsigned long memory_total;
unsigned long memory_free;
// Check if we can use texture memory for reference points
unsigned int use_texture = ( ref_width*size_of_float<=MAX_TEXTURE_WIDTH_IN_BYTES && height*size_of_float<=MAX_TEXTURE_HEIGHT_IN_BYTES );
// CUDA Initialisation
cuInit(0);
// Check free memory using driver API ; only (MAX_PART_OF_FREE_MEMORY_USED*100)% of memory will be used
CUcontext cuContext;
CUdevice cuDevice=0;
cuCtxCreate(&cuContext, 0, cuDevice);
cuMemGetInfo(&memory_free, &memory_total);
cuCtxDetach (cuContext);
// Determine maximum number of query that can be treated
max_nb_query_traited = ( memory_free * MAX_PART_OF_FREE_MEMORY_USED - size_of_float * ref_width*height ) / ( size_of_float * (height + ref_width) + size_of_int * k);
max_nb_query_traited = min( (unsigned long)query_width, (max_nb_query_traited / 16) * 16 );
// Allocation of global memory for query points and for distances
result = cudaMallocPitch( (void **) &query_dev, &query_pitch_in_bytes, max_nb_query_traited * size_of_float, height + ref_width);
if (result){
knn_cuda_with_indexes::printErrorMessage(result, max_nb_query_traited*size_of_float*(height+ref_width));
return;
}
query_pitch = query_pitch_in_bytes/size_of_float;
dist_dev = query_dev + height * query_pitch;
// Allocation of global memory for indexes
result = cudaMallocPitch( (void **) &ind_dev, &ind_pitch_in_bytes, max_nb_query_traited * size_of_int, k);
if (result){
cudaFree(query_dev);
knn_cuda_with_indexes::printErrorMessage(result, max_nb_query_traited*size_of_int*k);
return;
}
ind_pitch = ind_pitch_in_bytes/size_of_int;
// Allocation of memory (global or texture) for reference points
if (use_texture){
// Allocation of texture memory
cudaChannelFormatDesc channelDescA = cudaCreateChannelDesc<float>();
result = cudaMallocArray( &ref_array, &channelDescA, ref_width, height );
if (result){
knn_cuda_with_indexes::printErrorMessage(result, ref_width*height*size_of_float);
cudaFree(ind_dev);
cudaFree(query_dev);
return;
}
cudaMemcpyToArray( ref_array, 0, 0, ref_host, ref_width * height * size_of_float, cudaMemcpyHostToDevice );
// Set texture parameters and bind texture to array
texA.addressMode[0] = cudaAddressModeClamp;
texA.addressMode[1] = cudaAddressModeClamp;
texA.filterMode = cudaFilterModePoint;
texA.normalized = 0;
cudaBindTextureToArray(texA, ref_array);
}
else{
// Allocation of global memory
result = cudaMallocPitch( (void **) &ref_dev, &ref_pitch_in_bytes, ref_width * size_of_float, height);
if (result){
knn_cuda_with_indexes::printErrorMessage(result, ref_width*size_of_float*height);
cudaFree(ind_dev);
cudaFree(query_dev);
return;
}
ref_pitch = ref_pitch_in_bytes/size_of_float;
cudaMemcpy2D(ref_dev, ref_pitch_in_bytes, ref_host, ref_width*size_of_float, ref_width*size_of_float, height, cudaMemcpyHostToDevice);
}
// Split queries to fit in GPU memory
for (int i=0; i<query_width; i+=max_nb_query_traited){
// Number of query points considered
actual_nb_query_width = min( (unsigned long)max_nb_query_traited, (unsigned long)(query_width-i) );
// Copy of part of query actually being treated
cudaMemcpy2D(query_dev, query_pitch_in_bytes, &query_host[i], query_width*size_of_float, actual_nb_query_width*size_of_float, height, cudaMemcpyHostToDevice);
// Grids ans threads
dim3 g_16x16(actual_nb_query_width/16, ref_width/16, 1);
dim3 t_16x16(16, 16, 1);
if (actual_nb_query_width%16 != 0) g_16x16.x += 1;
if (ref_width %16 != 0) g_16x16.y += 1;
//
dim3 g_256x1(actual_nb_query_width/256, 1, 1);
dim3 t_256x1(256, 1, 1);
if (actual_nb_query_width%256 != 0) g_256x1.x += 1;
//
dim3 g_k_16x16(actual_nb_query_width/16, k/16, 1);
dim3 t_k_16x16(16, 16, 1);
if (actual_nb_query_width%16 != 0) g_k_16x16.x += 1;
if (k %16 != 0) g_k_16x16.y += 1;
// Kernel 1: Compute all the distances
if (use_texture)
cuComputeDistanceTexture<<<g_16x16,t_16x16>>>(ref_width, query_dev, actual_nb_query_width, query_pitch, height, dist_dev);
else
cuComputeDistanceGlobal<<<g_16x16,t_16x16>>>(ref_dev, ref_width, ref_pitch, query_dev, actual_nb_query_width, query_pitch, height, dist_dev);
// Kernel 2: Sort each column
cuInsertionSort<<<g_256x1,t_256x1>>>(dist_dev, query_pitch, ind_dev, ind_pitch, actual_nb_query_width, ref_width, k);
// Kernel 3: Compute square root of k first elements
cuParallelSqrt<<<g_k_16x16,t_k_16x16>>>(dist_dev, query_width, query_pitch, k);
// Memory copy of output from device to host
cudaMemcpy2D(&dist_host[i], query_width*size_of_float, dist_dev, query_pitch_in_bytes, actual_nb_query_width*size_of_float, k, cudaMemcpyDeviceToHost);
cudaMemcpy2D(&ind_host[i], query_width*size_of_int, ind_dev, ind_pitch_in_bytes, actual_nb_query_width*size_of_int, k, cudaMemcpyDeviceToHost);
}
// Free memory
if (use_texture)
cudaFreeArray(ref_array);
else
cudaFree(ref_dev);
cudaFree(ind_dev);
cudaFree(query_dev);
}
void knn_cuda_with_indexes::computeDistances(float *ref_host, int ref_width, float *query_host, int query_width, int height, float *dist_host, bool inf, float norm) {
unsigned int size_of_float = sizeof(float);
unsigned int size_of_int = sizeof(int);
// Variables
float *query_dev;
float *ref_dev;
float *dist_dev;
cudaArray *ref_array;
cudaError_t result;
size_t query_pitch;
size_t query_pitch_in_bytes;
size_t ref_pitch;
size_t ref_pitch_in_bytes;
size_t max_nb_query_traited;
size_t actual_nb_query_width;
unsigned long memory_total;
unsigned long memory_free;
// Check if we can use texture memory for reference points
unsigned int use_texture = ( ref_width*size_of_float<=MAX_TEXTURE_WIDTH_IN_BYTES && height*size_of_float<=MAX_TEXTURE_HEIGHT_IN_BYTES );
// CUDA Initialisation
cuInit(0);
// Check free memory using driver API ; only (MAX_PART_OF_FREE_MEMORY_USED*100)% of memory will be used
CUcontext cuContext;
CUdevice cuDevice=0;
cuCtxCreate(&cuContext, 0, cuDevice);
cuMemGetInfo(&memory_free, &memory_total);
cuCtxDetach (cuContext);
// Determine maximum number of query that can be treated
max_nb_query_traited = ( memory_free * MAX_PART_OF_FREE_MEMORY_USED - size_of_float * ref_width*height ) / ( size_of_float * (height + ref_width) + size_of_int * ref_width);
max_nb_query_traited = min( (unsigned long)query_width, (max_nb_query_traited / 16) * 16 );
// Allocation of global memory for query points and for distances
result = cudaMallocPitch( (void **) &query_dev, &query_pitch_in_bytes, max_nb_query_traited * size_of_float, height + ref_width);
if (result){
knn_cuda_with_indexes::printErrorMessage(result, max_nb_query_traited*size_of_float*(height+ref_width));
return;
}
query_pitch = query_pitch_in_bytes/size_of_float;
dist_dev = query_dev + height * query_pitch;
// Allocation of memory (global or texture) for reference points
if (use_texture){
// Allocation of texture memory
cudaChannelFormatDesc channelDescA = cudaCreateChannelDesc<float>();
result = cudaMallocArray( &ref_array, &channelDescA, ref_width, height );
if (result){
knn_cuda_with_indexes::printErrorMessage(result, ref_width*height*size_of_float);
cudaFree(query_dev);
return;
}
cudaMemcpyToArray( ref_array, 0, 0, ref_host, ref_width * height * size_of_float, cudaMemcpyHostToDevice );
// Set texture parameters and bind texture to array
texA.addressMode[0] = cudaAddressModeClamp;
texA.addressMode[1] = cudaAddressModeClamp;
texA.filterMode = cudaFilterModePoint;
texA.normalized = 0;
cudaBindTextureToArray(texA, ref_array);
} else{
// Allocation of global memory
result = cudaMallocPitch( (void **) &ref_dev, &ref_pitch_in_bytes, ref_width * size_of_float, height);
if (result){
knn_cuda_with_indexes::printErrorMessage(result, ref_width*size_of_float*height);
cudaFree(query_dev);
return;
}
ref_pitch = ref_pitch_in_bytes/size_of_float;
cudaMemcpy2D(ref_dev, ref_pitch_in_bytes, ref_host, ref_width*size_of_float, ref_width*size_of_float, height, cudaMemcpyHostToDevice);
}
// Split queries to fit in GPU memory
for (int i=0; i<query_width; i+=max_nb_query_traited){
// Number of query points considered
actual_nb_query_width = min( (unsigned long)max_nb_query_traited, (unsigned long)(query_width-i) );
// Copy of part of query actually being treated
cudaMemcpy2D(query_dev, query_pitch_in_bytes, &query_host[i], query_width*size_of_float, actual_nb_query_width*size_of_float, height, cudaMemcpyHostToDevice);
// Grids ans threads
dim3 g_16x16(actual_nb_query_width/16, ref_width/16, 1);
dim3 t_16x16(16, 16, 1);
if (actual_nb_query_width%16 != 0) g_16x16.x += 1;
if (ref_width %16 != 0) g_16x16.y += 1;
//
dim3 g_256x1(actual_nb_query_width/256, 1, 1);
if (actual_nb_query_width%256 != 0) g_256x1.x += 1;
//
dim3 g_k_16x16(actual_nb_query_width/16, ref_width/16, 1);
dim3 t_k_16x16(16, 16, 1);
if (actual_nb_query_width%16 != 0) g_k_16x16.x += 1;
if (ref_width %16 != 0) g_k_16x16.y += 1;
// Kernel 1: Compute all the distances
if (use_texture)
cuComputeNormTexture<<<g_16x16,t_16x16>>>(ref_width, query_dev, actual_nb_query_width, query_pitch, height, dist_dev, norm, inf);
else
cuComputeNormGlobal<<<g_16x16,t_16x16>>>(ref_dev, ref_width, ref_pitch, query_dev, actual_nb_query_width, query_pitch, height, dist_dev, norm, inf);
// Memory copy of output from device to host
cudaMemcpy2D(&dist_host[i], query_width*size_of_float, dist_dev, query_pitch_in_bytes, actual_nb_query_width*size_of_float, ref_width, cudaMemcpyDeviceToHost);
}
// Free memory
if (use_texture)
cudaFreeArray(ref_array);
else
cudaFree(ref_dev);
cudaFree(query_dev);
}
//-----------------------------------------------------------------------------------------------//
// MATLAB INTERFACE & C EXAMPLE //
//-----------------------------------------------------------------------------------------------//
#if MATLAB_CODE == 1
/**
* Interface to use CUDA code in Matlab (gateway routine).
*
* @param nlhs Number of expected mxArrays (Left Hand Side)
* @param plhs Array of pointers to expected outputs
* @param nrhs Number of inputs (Right Hand Side)
* @param prhs Array of pointers to input data. The input data is read-only and should not be altered by your mexFunction .
*/
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]){
// Variables
float* ref;
int ref_width;
int ref_height;
float* query;
int query_width;
int query_height;
float* dist;
int* ind;
int k;
// Reference points
ref = (float *) mxGetData(prhs[0]);
ref_width = mxGetM(prhs[0]);
ref_height = mxGetN(prhs[0]);
// Query points
query = (float *) mxGetData(prhs[1]);
query_width = mxGetM(prhs[1]);
query_height = mxGetN(prhs[1]);
// Number of neighbors to consider
k = (int)mxGetScalar(prhs[2]);
// Verification of the reference point and query point sizes
if (ref_height!=query_height)
mexErrMsgTxt("Data must have the same dimension");
if (ref_width*sizeof(float)>MAX_PITCH_VALUE_IN_BYTES)
mexErrMsgTxt("Reference number is too large for CUDA (Max=65536)");
if (query_width*sizeof(float)>MAX_PITCH_VALUE_IN_BYTES)
mexErrMsgTxt("Query number is too large for CUDA (Max=65536)");
// Allocation of output arrays
dist = (float *) mxGetPr(plhs[0] = mxCreateNumericMatrix(query_width, k, mxSINGLE_CLASS, mxREAL));
ind = (int *) mxGetPr(plhs[1] = mxCreateNumericMatrix(query_width, k, mxINT32_CLASS, mxREAL));
// Call KNN CUDA
knn(ref, ref_width, query, query_width, ref_height, k, dist, ind);
}
#else // C code
/**
* Example of use of kNN search CUDA.
*/
//int main(void){
// // Variables and parameters
// float* ref; // Pointer to reference point array
// float* query; // Pointer to query point array
// float* dist; // Pointer to distance array
// int* ind; // Pointer to index array
// int ref_nb = 4096; // Reference point number, max=65535
// int query_nb = 4096; // Query point number, max=65535
// int dim = 32; // Dimension of points
// int k = 20; // Nearest neighbors to consider
// int iterations = 100;
// int i;
// // Memory allocation
// ref = (float *) malloc(ref_nb * dim * sizeof(float));
// query = (float *) malloc(query_nb * dim * sizeof(float));
// dist = (float *) malloc(query_nb * k * sizeof(float));
// ind = (int *) malloc(query_nb * k * sizeof(float));
// // Init
// srand(time(NULL));
// for (i=0 ; i<ref_nb * dim ; i++) ref[i] = (float)rand() / (float)RAND_MAX;
// for (i=0 ; i<query_nb * dim ; i++) query[i] = (float)rand() / (float)RAND_MAX;
// // Variables for duration evaluation
// cudaEvent_t start, stop;
// cudaEventCreate(&start);
// cudaEventCreate(&stop);
// float elapsed_time;
// // Display informations
// printf("Number of reference points : %6d\n", ref_nb );
// printf("Number of query points : %6d\n", query_nb);
// printf("Dimension of points : %4d\n", dim );
// printf("Number of neighbors to consider : %4d\n", k );
// printf("Processing kNN search :" );
// // Call kNN search CUDA
// cudaEventRecord(start, 0);
// for (i=0; i<iterations; i++)
// knn(ref, ref_nb, query, query_nb, dim, k, dist, ind);
// cudaEventRecord(stop, 0);
// cudaEventSynchronize(stop);
// cudaEventElapsedTime(&elapsed_time, start, stop);
// printf(" done in %f s for %d iterations (%f s by iteration)\n", elapsed_time/1000, iterations, elapsed_time/(iterations*1000));
// // Destroy cuda event object and free memory
// cudaEventDestroy(start);
// cudaEventDestroy(stop);
// free(ind);
// free(dist);
// free(query);
// free(ref);
//}
#endif
|
80fc352bb7bcb0b44fe43b0df68120ddd6c54ea9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
///function for deep learning calculation
#include "Function_hip.cuh"
hipError_t cudaStatus;
void Check(int* output_shape, float* dev_out)
{
int size_result = 1;
for (int i = 0; i < 4; i++) size_result *= output_shape[i];
cout << "size: " << size_result << endl;
float* host_result = new float[size_result];
memset(host_result, 0, size_result * sizeof(float));
hipMemcpy(host_result, dev_out, size_result * sizeof(float), hipMemcpyDeviceToHost);
float sum = 0;
for (int i = 0; i < size_result; i++) sum += host_result[i];
cout << "average of output: " << sum / (float)size_result << endl;
int N = output_shape[0]; cout << "N=" << N << endl;
int C = output_shape[1]; cout << "C=" << C << endl;
int H = output_shape[2]; cout << "H=" << H << endl;
int W = output_shape[3]; cout << "W=" << W << endl;
float sum1 = 0;
float tmp1;
for (int l = 0; l < W; l++) {
tmp1 = host_result[0 * (C*H*W) + 0 * (H*W) + 0 * (W)+l];
cout << "value of output[0,0,0,:]: " << tmp1 << endl;
sum1 += tmp1;
}
cout << "average of output[0,0,0,:]: " << sum1 / (float)W << endl;
delete[] host_result;
}
//index change
__device__ __host__ void idx2d(int tid,
int ni, int nj,
int& i, int& j) {
i = tid / nj;
j = tid % nj;
}
__device__ __host__ void idx4d(int tid,
int ni, int nj, int nk, int nl,
int& i, int& j, int& k, int& l) {
i = tid / (nj*nk*nl);
tid = tid - (i*(nj*nk*nl));
j = tid / (nk*nl);
tid = tid - (j*(nk*nl));
k = tid / (nl);
l = tid % (nl);
}
__device__ __host__ void idx6d(int tid,
int ni, int nj, int nk, int nl, int nm, int nn,
int& i, int& j, int& k, int& l, int& m, int& n) {
i = tid / (nj*nk*nl*nm*nn);
tid = tid - (i*(nj*nk*nl*nm*nn));
j = tid / (nk*nl*nm*nn);
tid = tid - (j*(nk*nl*nm*nn));
k = tid / (nl*nm*nn);
tid = tid - (k*(nl*nm*nn));
l = tid / (nm*nn);
tid = tid - (l*(nm*nn));
m = tid / (nn);
n = tid % (nn);
}
void Dot(float* C, float* A, float* B, const int r, const int c, const int n) {
float temp;
for (int i = 0; i < r; i++) {
for (int j = 0; j < c; j++) {
temp = 0.0;
for (int k = 0; k < n; k++) {
temp += A[i*n + k] * B[k*c + j];
}
C[i*c + j] = temp;
}
}
}
__global__ void Kernel_Dot(float* C, float* A, float* B,
const int r, const int c, const int n) {
unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int N = r*c;
int i, j;
float temp, A_val, B_val;
while (tid < N)
{
temp = 0.0;
A_val = 0.0;
B_val = 0.0;
idx2d(tid, r, c, i, j);
for (int k = 0; k < n; k++) {
A_val = A[i*n + k];
B_val = B[k*c + j];
temp += A_val*B_val;
}
C[i*c + j] = temp;
tid += gridDim.x*blockDim.x;
}
}
void Dot_gpu(float* dev_C, float* dev_A, float* dev_B,
const int r, const int c, const int n) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
Kernel_Dot << < dimGrid, dimBlock >> > (dev_C, dev_A, dev_B, r, c, n);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
__global__ void Kernel_Dot_coalescing1(float* C, float* A, float* B,
const int r, const int c, const int n) {
unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int N = r*c;
int i, j;
float temp, A_val, B_val;
while (tid < N)
{
temp = 0.0;
A_val = 0.0;
B_val = 0.0;
idx2d(tid, r, c, i, j);
for (int k = 0; k < n; k++) {
A_val = A[k*r + i];
B_val = B[k*c + j];
temp += A_val*B_val;
}
C[i*c + j] = temp;
tid += gridDim.x*blockDim.x;
}
}
void Dot_coalescing1_gpu(float* dev_c, float* dev_a, float* dev_b,
const int r, const int c, const int n) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
Kernel_Dot_coalescing1 << < dimGrid, dimBlock >> > (dev_c, dev_a, dev_b, r, c, n);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
__global__ void Kernel_Dot_coalescing2(float* C, float* A, float* B,
const int r, const int c, const int n) {
unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int N = r*c;
int i, j;
float temp, A_val, B_val;
while (tid < N)
{
temp = 0.0;
A_val = 0.0;
B_val = 0.0;
idx2d(tid, r, c, i, j);
for (int k = 0; k < n; k++) {
A_val = A[i*n + k];
B_val = B[j*n + k];
temp += A_val*B_val;
}
C[i*c + j] = temp;
tid += gridDim.x*blockDim.x;
}
}
void Dot_coalescing2_gpu(float* dev_c, float* dev_a, float* dev_b,
const int r, const int c, const int n) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
Kernel_Dot_coalescing2 << < dimGrid, dimBlock >> > (dev_c, dev_a, dev_b, r, c, n);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
__global__ void Kernel_Dot_reduction1(float* dev_a, float* dev_b,
const int r, const int c, const int n,
float* reduction) {
__shared__ float shared[BLOCK_SIZE];
unsigned int k = blockDim.x*blockIdx.x + threadIdx.x;
unsigned int sharedIdx = threadIdx.x;
if (k >= n) return;
float A_val = 0;
float B_val = 0;
int m;
for (int i = 0; i < r; i++) {
for (int j = 0; j < c; j++) {
A_val = dev_a[i*n + k];
B_val = dev_b[k*c + j];
shared[sharedIdx] = A_val*B_val;
__syncthreads();
m = blockDim.x / 2;
while (m != 0) {
if (sharedIdx < m) shared[sharedIdx] += shared[sharedIdx + m];
__syncthreads();
m /= 2;
}
if (sharedIdx == 0) reduction[i*(c*gridDim.x) + j*(gridDim.x) + blockIdx.x] = shared[0];
__syncthreads();
}
}
}
__global__ void Kernel_Dot_reduction2(float* dev_c, float* reduction, int r, const int c, const int n,
int size_block) {
unsigned int i = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int j = blockDim.y * blockIdx.y + threadIdx.y;
if (i >= r || j >= c) return;
float temp = 0;
for (int k = 0; k < size_block; k++) {
temp += reduction[i*(c*size_block) + j*(size_block)+k];
}
dev_c[i*c + j] = temp;
}
void Dot_reduction_gpu(float* dev_c, float* dev_a, float* dev_b,
const int r, const int c, const int n,
float* reduction)
{
dim3 dimBlock1(BLOCK_SIZE);
dim3 dimGrid1((n + BLOCK_SIZE - 1) / BLOCK_SIZE);
Kernel_Dot_reduction1 << < dimGrid1, dimBlock1 >> > (dev_a, dev_b, r, c, n, reduction);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
int size_block = (n + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 dimBlock2(BLOCK_SIZE_X, BLOCK_SIZE_Y);
dim3 dimGrid2((r + BLOCK_SIZE_X - 1) / BLOCK_SIZE_X, (c + BLOCK_SIZE_Y - 1) / BLOCK_SIZE_Y);
Kernel_Dot_reduction2 << < dimGrid2, dimBlock2 >> > (dev_c, reduction, r, c, n, size_block);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
__global__ void Kernel_Dot_atomic(float* dev_c, float* dev_a, float* dev_b,
const int r, const int c, const int n) {
unsigned int k = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int N = n;
float temp, A_val, B_val;
while (k < N)
{
for (int i = 0; i < r; i++)
{
for (int j = 0; j < c; j++)
{
A_val = dev_a[i*n + k];
B_val = dev_b[k*c + j];
temp = A_val * B_val;
atomicAdd(&(dev_c[i*c + j]), temp);
}
}
k += gridDim.x*blockDim.x;
}
}
void Dot_atomic_gpu(float* dev_C, float* dev_A, float* dev_B,
const int r, const int c, const int n) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
Kernel_Dot_atomic << < dimGrid, dimBlock >> > (dev_C, dev_A, dev_B, r, c, n);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
void Sum(char txt, float* A, float* B, const int r, const int c) {
switch (txt)
{
case 'f':
for (int i = 0; i < r; i++) {
for (int j = 0; j < c; j++) {
A[i*c + j] += B[j];
}
}
break;
case 'b':
for (int j = 0; j < c; j++) {
A[j] = 0.0;
}
for (int j = 0; j < c; j++) {
for (int i = 0; i < r; i++) {
A[j] += B[i*c + j];
}
}
break;
default:
cout << "Error for 'txt' variable!" << endl;
break;
}
}
__global__ void Kernel_Sum_forward(float* dev_A, float* dev_B, const int r, const int c) {
unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x;
int N = r*c;
int i, j;
while (tid < N)
{
idx2d(tid, r, c, i, j);
dev_A[i*c + j] += dev_B[j];
tid += gridDim.x*blockDim.x;
}
}
__global__ void Kernel_Sum_backward(float* dev_A, float* dev_B, const int r, const int c) {
unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x;
int N = c;
int j;
while (tid < N)
{
j = tid;
dev_A[j] = 0.0;
for (int i = 0; i < r; i++) {
dev_A[j] += dev_B[i*c + j];
}
tid += gridDim.x*blockDim.x;
}
}
//template <unsigned int blockSize>
//__device__ void warpReduce(volatile float* sdata, int tid)
//{
// if (blockSize >= 64) sdata[tid] += sdata[tid + 32];
// if (blockSize >= 32) sdata[tid] += sdata[tid + 16];
// if (blockSize >= 16) sdata[tid] += sdata[tid + 8];
// if (blockSize >= 8) sdata[tid] += sdata[tid + 4];
// if (blockSize >= 4) sdata[tid] += sdata[tid + 2];
// if (blockSize >= 2) sdata[tid] += sdata[tid + 1];
//}
//__global__ void Kernel_Sum_backward_opt(float* dev_sum, float* dev_B, const int r, const int c) {
// __shared__ float sdata[(BLOCK_SIZE_opt / 2)];
// unsigned int tid = threadIdx.x;
// unsigned int i = (blockDim.x * 2) * blockIdx.x + threadIdx.x;
// //if (i >= r) return;
// for (int j = 0; j < c; j++) {
// sdata[tid] = dev_B[i*c + j] + dev_B[(i + blockDim.x)*c + j];
// __syncthreads();
// if (blockDim.x >= 512) {
// if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads();
// }
// if (blockDim.x >= 256) {
// if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads();
// }
// if (blockDim.x >= 128) {
// if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads();
// }
// if (tid < 32) warpReduce<BLOCK_SIZE_opt / 2>(sdata, tid);
// if (tid == 0) dev_sum[blockIdx.x*c + j] = sdata[0];
// __syncthreads();
// }
//}
__global__ void Kernel_Sum_backward_opt_sum(float* dev_A, float* dev_sum, int r_sum, const int c) {
unsigned int j = blockDim.x * blockIdx.x + threadIdx.x;
if (j >= c) return;
float temp = 0;
for (int i = 0; i < r_sum; i++) {
temp += dev_sum[i*c + j];
}
dev_A[j] = temp;
}
__global__ void Kernel_Sum_backward1(float* dev_B, float* dev_partial, const int r, const int c) {
__shared__ float cache[BLOCK_SIZE];
unsigned int i = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int cacheIndex = threadIdx.x;
if (i >= r) return;
for (int j = 0; j < c; j++) {
cache[cacheIndex] = dev_B[i*c + j];
__syncthreads();
int k = blockDim.x / 2;
while (k != 0) {
if (cacheIndex < k) cache[cacheIndex] += cache[cacheIndex + k];
__syncthreads();
k /= 2;
}
if (cacheIndex == 0) dev_partial[blockIdx.x*c + j] = cache[0];
__syncthreads();
}
}
__global__ void Kernel_Sum_backward2(float* dev_A, float* dev_partial, const int r, const int c,
int size_partial) {
unsigned int j = blockDim.x * blockIdx.x + threadIdx.x;
if (j >= c) return;
int i;
float temp = 0;
for (i = 0; i < size_partial; i++) {
temp += dev_partial[i*c + j];
}
dev_A[j] = temp;
}
void Sum_gpu(char txt, float* dev_A, float* dev_B, const int r, const int c) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
switch (txt)
{
case 'f':
Kernel_Sum_forward << < dimGrid, dimBlock >> > (dev_A, dev_B, r, c);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
break;
case 'b':
Kernel_Sum_backward << < dimGrid, dimBlock >> > (dev_A, dev_B, r, c);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
break;
default:
cout << "Error for 'txt' variable!" << endl;
break;
}
}
void Sum_gpu(char txt, float* dev_A, float* dev_B, const int r, const int c,
float* dev_sum)
{
if (txt != 'b')
cout << "(Sum_gpu) this function should be in backward" << endl;
dim3 dimBlock(BLOCK_SIZE_opt / 2); //halve the number of threads
dim3 dimGrid((r + BLOCK_SIZE_opt - 1) / BLOCK_SIZE_opt);
//Kernel_Sum_backward_opt << < dimGrid, dimBlock >> > (dev_sum, dev_B, r, c);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
int r_sum = (r + BLOCK_SIZE_opt - 1) / BLOCK_SIZE_opt;
dim3 dimBlock_sum(BLOCK_SIZE_opt);
dim3 dimGrid_sum((c + BLOCK_SIZE_opt - 1) / BLOCK_SIZE_opt);
Kernel_Sum_backward_opt_sum << < dimGrid_sum, dimBlock_sum >> > (dev_A, dev_sum, r_sum, c);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
void Sum_gpu1(char txt, float* dev_A, float* dev_B, const int r, const int c,
float* dev_partial, int size_partial) {
dim3 dimBlock2(BLOCK_SIZE);
dim3 dimGrid2((r + BLOCK_SIZE - 1) / BLOCK_SIZE);
if (dimGrid2.x > MAX_GRID_SIZE) {
cout << "dimension of Grid exceeds " << MAX_GRID_SIZE << " in 'Sum_gpu'!" << endl;
}
Kernel_Sum_backward1 << < dimGrid2, dimBlock2 >> > (dev_B, dev_partial, r, c);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
dim3 dimBlock3(BLOCK_SIZE);
dim3 dimGrid3((c + BLOCK_SIZE - 1) / BLOCK_SIZE);
if (dimGrid3.x > MAX_GRID_SIZE) {
cout << "dimension of Grid exceeds " << MAX_GRID_SIZE << " in 'Sum_gpu'!" << endl;
}
Kernel_Sum_backward2 << < dimGrid3, dimBlock3 >> > (dev_A, dev_partial, r, c, size_partial);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
/*loss function*/
float MSE(float** x1, float** x2, const int r, const int c) {
float temp = 0.0;
for (int i = 0; i < r; i++) {
for (int j = 0; j < c; j++) {
temp += pow(x1[i][j] - x2[i][j], 2);
}
}
temp /= 2.0*r;
return temp;
}
void Softmax(float* x, const int r, const int c) {
float temp1, temp2;
for (int i = 0; i < r; i++) {
temp1 = 0.;
temp2 = 0.;
for (int j = 0; j < c; j++)
{
temp1 = max(x[i*c + j], temp1);
}
for (int j = 0; j < c; j++)
{
x[i*c + j] = expf(x[i*c + j] - temp1);
temp2 += x[i*c + j];
}
for (int j = 0; j < c; j++) x[i*c + j] /= temp2;
}
}
void Softmax_seg(float* x, const int size_category, const int size_spatial_feature_map)
{
int c = size_category;
int size = size_spatial_feature_map;
float temp1, temp2;
for (int i = 0; i < size; i++) {
temp1 = 0.;
temp2 = 0.;
for (int j = 0; j < c; j++)
{
temp1 = max(x[j*size + i], temp1);
}
for (int j = 0; j < c; j++)
{
x[j*size + i] = expf(x[j*size + i] - temp1);
temp2 += x[j*size + i];
}
for (int j = 0; j < c; j++) x[j*size + i] /= temp2;
}
}
__global__ void Kernel_Softmax(float* dev_x, const int r, const int c) {
unsigned int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= r) return;
float temp1 = 0., temp2 = 0.;
for (int j = 0; j < c; j++) temp1 = max(dev_x[i*c + j], temp1);
for (int j = 0; j < c; j++) {
dev_x[i*c + j] = expf(dev_x[i*c + j] - temp1);
temp2 += dev_x[i*c + j];
}
for (int j = 0; j < c; j++) dev_x[i*c + j] /= temp2;
}
void Softmax_gpu(float* dev_x, const int r, const int c) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid((r + BLOCK_SIZE - 1) / BLOCK_SIZE);
if (dimGrid.x > MAX_GRID_SIZE) {
cout << "dimension of Grid exceeds " << MAX_GRID_SIZE << " in 'Softmax_gpu'!" << endl;
}
Kernel_Softmax << < dimGrid, dimBlock >> > (dev_x, r, c);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
__global__ void Kernel_Softmax_seg(float* dev_x, const int c, const int size) {
unsigned int i = blockDim.x * blockIdx.x + threadIdx.x;
int N = size;
float temp = 0.;
while (i < N)
{
for (int j = 0; j < c; j++)
temp = max(dev_x[j*size + i], temp);
for (int j = 0; j < c; j++)
dev_x[j*size + i] = expf(dev_x[j*size + i] - temp);
temp = 0.0;
for (int j = 0; j < c; j++)
temp += dev_x[j*size + i];
for (int j = 0; j < c; j++)
dev_x[j*size + i] /= temp;
i += gridDim.x*blockDim.x;
}
}
void Softmax_seg_gpu(float* dev_x, const int size_category, const int size_spatial_feature_map) {
int size = size_spatial_feature_map;
int c = size_category;
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
Kernel_Softmax_seg << < dimGrid, dimBlock >> > (dev_x, c, size);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
__global__ void Kernel_Softmax4d(float* dev_x, int N, int C, int H, int W) {
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int Max = H*W;
if (tid >= Max) return;
int i, j;
idx2d(tid, H, W, i, j);
int idx;
float temp_max = 0;
for (int n = 0; n < C; n++) temp_max = max(dev_x[0 * (C*H*W) + n*(H*W) + i*(W)+j], temp_max);
float temp_sum = 0;
for (int n = 0; n < C; n++) temp_sum += expf(dev_x[0 * (C*H*W) + n*(H*W) + i*(W)+j] - temp_max);
for (int n = 0; n < C; n++) dev_x[0 * (C*H*W) + n*(H*W) + i*(W)+j] = expf(dev_x[0 * (C*H*W) + n*(H*W) + i*(W)+j] - temp_max) / temp_sum;
}
void Softmax4d_gpu(float* dev_x, int N, int C, int H, int W) {
if (N != 1) // the batch size 'XN' must be 1
{
cout << "the batch size 'N' must be 1! N=[" << N << "] at Softmax4d_gpu" << endl;
}
int size = H*W;
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid((size + BLOCK_SIZE - 1) / BLOCK_SIZE);
if (dimGrid.x > MAX_GRID_SIZE) {
cout << "dimension of Grid exceeds " << MAX_GRID_SIZE << " in 'Softmax4d_gpu'!" << endl;
}
Kernel_Softmax4d << < dimGrid, dimBlock >> > (dev_x, N, C, H, W);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
__global__ void Softmax_shared1(float* dev_x, const int XN, const int DN, float* dev_partialX4d, int size_partialX4d) {
unsigned int cacheIdx = threadIdx.x;
__shared__ float cache[BLOCK_SIZE];
cache[cacheIdx] = 0;
__syncthreads();
unsigned int tid = blockDim.x*blockIdx.x + threadIdx.x;
if (tid >= DN) return;
cache[cacheIdx] = dev_x[tid];
__syncthreads();
int k = blockDim.x / 2;
while (k != 0) {
if (cacheIdx < k) cache[cacheIdx] = max(cache[cacheIdx], cache[cacheIdx + k]);
__syncthreads();
k /= 2;
}
if (cacheIdx == 0) dev_partialX4d[blockIdx.x] = cache[0];
__syncthreads();
}
__global__ void Softmax_shared2(float* dev_x, const int XN, const int DN, float* dev_partialX4d, int size_partialX4d) {
unsigned int tid = blockDim.x*blockIdx.x + threadIdx.x;
if (tid >= DN) return;
float sum = 0;
for (int i = 0; i < size_partialX4d; i++)
{
sum += dev_partialX4d[i];
}
dev_x[tid] = expf(dev_x[tid] - sum);
unsigned int cacheIdx = threadIdx.x;
__shared__ float cache[BLOCK_SIZE];
cache[cacheIdx] = 0;
__syncthreads();
cache[cacheIdx] = dev_x[tid];
__syncthreads();
int k = blockDim.x / 2;
while (k != 0) {
if (cacheIdx < k) cache[cacheIdx] += cache[cacheIdx + k];
__syncthreads();
k /= 2;
}
if (cacheIdx == 0) dev_partialX4d[blockIdx.x] = cache[0];
__syncthreads();
}
__global__ void Softmax_shared3(float* dev_x, const int XN, const int DN, float* dev_partialX4d, int size_partialX4d) {
unsigned int tid = blockDim.x*blockIdx.x + threadIdx.x;
if (tid >= DN) return;
float sum = 0;
for (int i = 0; i < size_partialX4d; i++)
{
sum += dev_partialX4d[i];
}
dev_x[tid] /= sum;
}
void Softmax_gpu_shared(float* dev_x, const int XN, const int DN, float* dev_partialX4d, int size_partialX4d) {
if (XN != 1) // the batch size 'XN' must be 1
{
cout << "the batch size 'XN' must be 1! XN=[" << XN << ']' << endl;
}
dim3 dimBlock1(BLOCK_SIZE);
dim3 dimGrid1((DN + BLOCK_SIZE - 1) / BLOCK_SIZE);
if (dimGrid1.x > MAX_GRID_SIZE) {
cout << "dimension of Grid exceeds " << MAX_GRID_SIZE << " in 'Softmax_gpu_shared1'!" << endl;
}
Softmax_shared1 << < dimGrid1, dimBlock1 >> > (dev_x, XN, DN, dev_partialX4d, size_partialX4d);
gpuErrchk(hipGetLastError());
dim3 dimBlock2(BLOCK_SIZE);
dim3 dimGrid2((DN + BLOCK_SIZE - 1) / BLOCK_SIZE);
if (dimGrid2.x > MAX_GRID_SIZE) {
cout << "dimension of Grid exceeds " << MAX_GRID_SIZE << " in 'Softmax_gpu_shared2'!" << endl;
}
Softmax_shared2 << < dimGrid2, dimBlock2 >> > (dev_x, XN, DN, dev_partialX4d, size_partialX4d);
gpuErrchk(hipGetLastError());
dim3 dimBlock3(BLOCK_SIZE);
dim3 dimGrid3((DN + BLOCK_SIZE - 1) / BLOCK_SIZE);
if (dimGrid3.x > MAX_GRID_SIZE) {
cout << "dimension of Grid exceeds " << MAX_GRID_SIZE << " in 'Softmax_gpu_shared3'!" << endl;
}
Softmax_shared3 << < dimGrid3, dimBlock3 >> > (dev_x, XN, DN, dev_partialX4d, size_partialX4d);
gpuErrchk(hipGetLastError());
}
float CEE_seg(float* x, int* t, const int size_category, const int size_spatial_feature_map)
{
int c = size_category;
int size = size_spatial_feature_map;
float temp = 0;
for (int j = 0; j < size; j++) {
for (int i = 0; i < c; i++) {
if (i == t[j]) {
temp += log(x[i*size + j] + 1e-7);
continue;
}
}
}
temp /= -size;
return temp;
}
__global__ void Kernel_CEE_seg(float* dev_x, int* dev_t, float* dev_loss, const int c, const int size)
{
int j = blockDim.x*blockIdx.x + threadIdx.x;
int N = size;
float temp = 0;
while (j < N)
{
for (int i = 0; i < c; i++) {
if (i == dev_t[j]) {
temp = logf(dev_x[i*size + j] + 1e-7);
atomicAdd(dev_loss, temp);
continue;
}
}
j += gridDim.x*blockDim.x;
}
}
float CEE_seg_gpu(float* dev_x, int* dev_t, float* dev_loss,
const int size_category, const int size_spatial_feature_map) {
int c = size_category;
int size = size_spatial_feature_map;
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
hipMemset(dev_loss, 0, sizeof(float));
Kernel_CEE_seg << < dimGrid, dimBlock >> > (dev_x, dev_t, dev_loss, c, size);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
float loss = 0;
hipMemcpy(&loss, dev_loss, sizeof(float), hipMemcpyDeviceToHost);
loss /= -size;
return loss;
}
/*padding and stride*/
void Padding_forward(char txt, float* x_pad, float* x, const int pad,
const int XN, const int XC, const int XH, const int XW) {
int idx, idx_pad;
int XH_pad = XH + 2 * pad;
int XW_pad = XW + 2 * pad;
for (int i = 0; i < XN; i++) {
for (int j = 0; j < XC; j++) {
for (int k = 0; k < XH; k++) {
for (int l = 0; l < XW; l++) {
idx = i*(XC*XH*XW) + j*(XH*XW) + k*(XW)+l;
idx_pad = i*(XC*XH_pad*XW_pad) + j*(XH_pad*XW_pad) + (k + pad)*(XW_pad)+(l + pad);
x_pad[idx_pad] = x[idx];
}
}
}
}
}
void Padding_backward(char txt, float* dx_pad, float* dx, const int pad,
const int XN, const int XC, const int XH, const int XW,
const int dXH, const int dXW)
{
int i, j, k, l;
int idx_dx, idx_dx_pad;
for (i = 0; i < XN; i++) {
for (j = 0; j < XC; j++) {
for (k = 0; k < XH; k++) {
for (l = 0; l < XW; l++) {
idx_dx_pad = i*(XC*XH*XW) + j*(XH*XW) + k*(XW)+l;
idx_dx = i*(XC*dXH*dXW) + j*(dXH*dXW) + (k + pad)*(dXW)+(l + pad);
dx_pad[idx_dx_pad] = dx[idx_dx];
}
}
}
}
}
__global__ void Kernel_Padding_forward(float* dev_x_pad, float*dev_X, const int pad,
const int XN, const int XC, const int XH, const int XW) {
int i, j, k, l;
int XH_pad = XH + 2 * pad;
int XW_pad = XW + 2 * pad;
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = XN*XC*XH*XW;
int idx_pad, idx;
while (tid < N)
{
idx4d(tid, XN, XC, XH, XW, i, j, k, l);
idx_pad = i*(XC*XH_pad*XW_pad) + j*(XH_pad*XW_pad) + (k + pad)*(XW_pad)+(l + pad);
idx = i*(XC*XH*XW) + j*(XH*XW) + k*(XW)+l;
dev_x_pad[idx_pad] = dev_X[idx];
tid += gridDim.x*blockDim.x;
}
}
void Padding_forward_gpu(float* dev_x_pad, float* dev_X, const int pad,
const int XN, const int XC, const int XH, const int XW) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
Kernel_Padding_forward << < dimGrid, dimBlock >> > (dev_x_pad, dev_X, pad, XN, XC, XH, XW);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
__global__ void Kernel_Padding_backward(float* dev_dx_pad, float*dev_dx, const int pad,
const int XN, const int XC, const int XH, const int XW,
const int dXH, const int dXW) {
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = XN*XC*XH*XW;
int i, j, k, l, idx_dx_pad, idx_dx;
while (tid < N)
{
idx4d(tid, XN, XC, XH, XW, i, j, k, l);
idx_dx_pad = i*(XC*XH*XW) + j*(XH*XW) + k*(XW)+l;
idx_dx = i*(XC*dXH*dXW) + j*(dXH*dXW) + (k + pad)*(dXW)+(l + pad);
dev_dx_pad[idx_dx_pad] = dev_dx[idx_dx];
tid += gridDim.x*blockDim.x;
}
}
void Padding_backward_gpu(float* dev_dx_pad, float*dev_dx, const int pad,
const int XN, const int XC, const int XH, const int XW,
const int dxH, const int dxW) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
Kernel_Padding_backward << < dimGrid, dimBlock >> > (dev_dx_pad, dev_dx, pad, XN, XC, XH, XW, dxH, dxW);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
void Padding_transpose_forward(float* x_pad, float* x, int stride, int pad,
int XN, int XC, int XH, int XW, int XH_pad, int XW_pad)
{
int idx_pad, idx;
for (int i = 0; i < XN; i++) {
for (int j = 0; j < XC; j++) {
for (int k = 0; k < XH; k++) {
for (int l = 0; l < XW; l++) {
idx_pad = i*(XC*XH_pad*XW_pad) + j*(XH_pad*XW_pad) + (stride*k + pad)*(XW_pad)+(stride*l + pad);
idx = i*(XC*XH*XW) + j*(XH*XW) + k*(XW)+l;
x_pad[idx_pad] = x[idx];
}
}
}
}
}
void Padding_transpose_backward(float* dx_pad, float* dx, int stride, int pad,
int XN, int XC, int XH, int XW, int dXH, int dXW)
{
int idx_dx_pad, idx_dx;
for (int i = 0; i < XN; i++) {
for (int j = 0; j < XC; j++) {
for (int k = 0; k < XH; k++) {
for (int l = 0; l < XW; l++) {
idx_dx_pad = i*(XC*XH*XW) + j*(XH*XW) + k*(XW)+l;
idx_dx = i*(XC*dXH*dXW) + j*(dXH*dXW) + (stride*k + pad)*(dXW)+(stride*l + pad);
dx_pad[idx_dx_pad] = dx[idx_dx];
}
}
}
}
}
__global__ void Kernel_Padding_transpose_forward(float* dev_x_pad, float* dev_x, int stride, int pad,
int XN, int XC, int XH, int XW, int XH_pad, int XW_pad) {
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = XN*XC*XH*XW;
int i, j, k, l, idx_x_pad, idx_x;
while (tid < N)
{
idx4d(tid, XN, XC, XH, XW, i, j, k, l);
idx_x_pad = i*(XC*XH_pad*XW_pad) + j*(XH_pad*XW_pad) + (stride*k + pad)*(XW_pad)+(stride*l + pad);
idx_x = i*(XC*XH*XW) + j*(XH*XW) + k*(XW)+l;
dev_x_pad[idx_x_pad] = dev_x[idx_x];
tid += gridDim.x*blockDim.x;
}
}
void Padding_transpose_forward_gpu(float* dev_x_pad, float* dev_x, int stride, int pad,
int XN, int XC, int XH, int XW, int XH_pad, int XW_pad) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
Kernel_Padding_transpose_forward << < dimGrid, dimBlock >> > (dev_x_pad, dev_x, stride, pad, XN, XC, XH, XW, XH_pad, XW_pad);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
__global__ void Kernel_Padding_transpose_backward(float* dev_dx_pad, float* dev_dx, int stride, int pad,
int XN, int XC, int XH, int XW, int dXH, int dXW)
{
int i, j, k, l;
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = XN*XC*XH*XW;
if (tid >= N) return;
idx4d(tid, XN, XC, XH, XW, i, j, k, l);
int idx_dx_pad = i*(XC*XH*XW) + j*(XH*XW) + k*(XW)+l;
int idx_dx = i*(XC*dXH*dXW) + j*(dXH*dXW) + (stride*k + pad)*(dXW)+(stride*l + pad);
dev_dx_pad[idx_dx_pad] = dev_dx[idx_dx];
}
void Padding_transpose_backward_gpu(float* dev_dx_pad, float* dev_dx, int stride, int pad,
int XN, int XC, int XH, int XW, int dXH, int dXW)
{
int size = XN*XC*XH*XW;
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid((size + BLOCK_SIZE - 1) / BLOCK_SIZE);
if (dimGrid.x > MAX_GRID_SIZE || dimGrid.y > MAX_GRID_SIZE) {
cout << "dimension of Grid exceeds " << MAX_GRID_SIZE << " in 'padding_transpose_backward_gpu'!" << endl;
}
Kernel_Padding_transpose_backward << < dimGrid, dimBlock >> > (dev_dx_pad, dev_dx, stride, pad, XN, XC, XH, XW, dXH, dXW);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
void Stride_forward(float* col, float* img, int stride,
const int XN, const int XC, const int FH, const int FW, const int OH, const int OW,
const int XH, const int XW) {
int i, j, k, l, m, n, a, b;
int y_max, x_max;
int idx_col, idx_img;
for (i = 0; i < XN; i++) {
for (j = 0; j < XC; j++) {
for (k = 0; k < FH; k++) {
for (l = 0; l < FW; l++) {
for (m = 0; m < OH; m++) {
for (n = 0; n < OW; n++) {
idx_col = i*(XC*FH*FW*OH*OW) + j*(FH*FW*OH*OW) + k*(FW*OH*OW) + l*(OH*OW) + m*(OW)+n;
col[idx_col] = 0;
}
}
}
}
}
}
for (i = 0; i < XN; i++) {
for (j = 0; j < XC; j++) {
for (k = 0; k < FH; k++) {
y_max = k + stride*OH;
for (l = 0; l < FW; l++) {
x_max = l + stride*OW;
for (a = k, m = 0; a < y_max; a = a + stride, m++) {
for (b = l, n = 0; b < x_max; b = b + stride, n++) {
idx_col = i*(XC*FH*FW*OH*OW) + j*(FH*FW*OH*OW) + k*(FW*OH*OW) + l*(OH*OW) + m*(OW)+n;
idx_img = i*(XC*XH*XW) + j*(XH*XW) + a*(XW)+b;
col[idx_col] = img[idx_img];
}
}
}
}
}
}
}
__global__ void Kernel_Stride_forward(float* dev_col, float* dev_img, int stride,
const int XN, const int XC, const int FH, const int FW, const int OH, const int OW,
const int XH, const int XW) {
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = XN*XC*FH*FW*OH*OW;
int i, j, k, l, m, n, a, b;
int idx_col;
int idx_img;
while (tid < N)
{
idx6d(tid, XN, XC, FH, FW, OH, OW, i, j, k, l, m, n);
a = k + m*stride;
b = l + n*stride;
idx_col = i*(XC*FH*FW*OH*OW) + j*(FH*FW*OH*OW) + k*(FW*OH*OW) + l*(OH*OW) + m*(OW)+n;
idx_img = i*(XC*XH*XW) + j*(XH*XW) + a*(XW)+b;
dev_col[idx_col] = 0;
dev_col[idx_col] = dev_img[idx_img];
tid += gridDim.x * blockDim.x;
}
}
void Stride_forward_gpu(float* dev_col, float* dev_img, int stride,
const int XN, const int XC, const int FH, const int FW, const int OH, const int OW,
const int XH, const int XW) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
Kernel_Stride_forward << < dimGrid, dimBlock >> > (dev_col, dev_img, stride, XN, XC, FH, FW, OH, OW, XH, XW);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
void Stride_backward(float* img, float* col, int stride,
const int XN, const int XC, const int FH, const int FW, const int OH, const int OW,
const int XH, const int XW) {
int i, j, k, l, m, n;
int y_max, x_max;
int idx_img, idx_col;
for (i = 0; i < XN; i++) {
for (j = 0; j < XC; j++) {
for (k = 0; k < XH; k++) {
for (l = 0; l < XW; l++) {
idx_img = i*(XC*XH*XW) + j*(XH*XW) + k*(XW)+l;
img[idx_img] = 0;
}
}
}
}
for (i = 0; i < XN; i++) {
for (j = 0; j < XC; j++) {
for (k = 0; k < FH; k++) {
y_max = k + stride*OH;
for (l = 0; l < FW; l++) {
x_max = l + stride*OW;
for (int a = k, m = 0; a < y_max; a = a + stride, m++) {
for (int b = l, n = 0; b < x_max; b = b + stride, n++) {
idx_img = i*(XC*XH*XW) + j*(XH*XW) + a*(XW)+b;
idx_col = i*(XC*FH*FW*OH*OW) + j*(FH*FW*OH*OW) + k*(FW*OH*OW) + l*(OH*OW) + m*(OW)+n;
img[idx_img] += col[idx_col];
}
}
}
}
}
}
}
__global__ void Kernel_Stride_backward(float* dev_img, float* dev_col, int stride,
const int XN, const int XC, const int FH, const int FW, const int OH, const int OW,
const int XH, const int XW) {
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = XN*XC*XH*XW;
int i, j, a, b, idx_img, idx_col;
int k, l, m, n, temp;
while (tid < N)
{
idx4d(tid, XN, XC, XH, XW, i, j, a, b);
idx_img = i*(XC*XH*XW) + j*(XH*XW) + a*(XW)+b;
dev_img[idx_img] = 0;
for (k = 0; k < FH && k <= a; k++)
{
m = (a - k) / stride;
temp = k + stride*m;
if (temp != a || m >= OH)
continue;
for (l = 0; l < FW && l <= b; l++)
{
n = (b - l) / stride;
temp = l + stride*n;
if (temp != b || n >= OW)
continue;
idx_col = i*(XC*FH*FW*OH*OW) + j*(FH*FW*OH*OW) + k*(FW*OH*OW) + l*(OH*OW) + m*(OW)+n;
dev_img[idx_img] += dev_col[idx_col];
}
}
tid += gridDim.x*blockDim.x;
}
}
void Stride_backward_gpu(float* dev_img, float* dev_col, int stride,
const int XN, const int XC, const int FH, const int FW, const int OH, const int OW,
const int XH, const int XW) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
Kernel_Stride_backward << < dimGrid, dimBlock >> > (dev_img, dev_col, stride, XN, XC, FH, FW, OH, OW, XH, XW);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
/*reshape and transpose*/
void Flatten6d(float* flattenX, float****** X,
const int d1, const int d2, const int d3, const int d4, const int d5, const int d6) {
for (int i = 0; i < d1; i++) {
for (int j = 0; j < d2; j++) {
for (int k = 0; k < d3; k++) {
for (int l = 0; l < d4; l++) {
for (int m = 0; m < d5; m++) {
for (int n = 0; n < d6; n++) {
flattenX[i*(d2*d3*d4*d5*d6) + j*(d3*d4*d5*d6) + k*(d4*d5*d6) + l*(d5*d6) + m*(d6)+n]
= X[i][j][k][l][m][n];
}
}
}
}
}
}
}
void Flatten4d(float* flattenX, float**** X,
const int d1, const int d2, const int d3, const int d4) {
int i, j, k, l;
for (i = 0; i < d1; i++) {
for (j = 0; j < d2; j++) {
for (k = 0; k < d3; k++) {
for (l = 0; l < d4; l++) {
flattenX[i*(d2*d3*d4) + j*(d3*d4) + k*(d4)+l] = X[i][j][k][l];
}
}
}
}
}
void Flatten2d(float* flattenX, float** X,
const int d1, const int d2) {
int i, j;
for (i = 0; i < d1; i++) {
for (j = 0; j < d2; j++) {
flattenX[i*(d2)+j] = X[i][j];
}
}
}
void Flatten2d_int(int* flattenX, int** X,
const int d1, const int d2) {
int i, j;
for (i = 0; i < d1; i++) {
for (j = 0; j < d2; j++) {
flattenX[i*(d2)+j] = X[i][j];
}
}
}
void Reshape6to2(float** reshapeArray, float****** array,
const int XN, const int OH, const int OW, const int XC, const int FH, const int FW) {
int i, j, k, l, m, n;
for (i = 0; i < XN; i++) {
for (j = 0; j < OH; j++) {
for (k = 0; k < OW; k++) {
for (l = 0; l < XC; l++) {
for (m = 0; m < FH; m++) {
for (n = 0; n < FW; n++) {
reshapeArray[i*(OH*OW) + j*(OW)+k][l*(FH*FW) + m*(FH)+n] = array[i][j][k][l][m][n];
}
}
}
}
}
}
}
void Reshape6to2_gpu(float* dev_reshapeArray, float* dev_array,
const int XN, const int OH, const int OW, const int XC, const int FH, const int FW,
float* host_reshapeArray, int size_reshapeArray) {
//Kernel_Reshape6to2 << < 1, 1 >> > (dev_reshapeArray, dev_array, XN, OH, OW, XC, FH, FW);
//hipDeviceSynchronize();
//hipMemcpy(host_reshapeArray, dev_reshapeArray, size_reshapeArray * sizeof(float), hipMemcpyDeviceToHost);
}
void Reshape6to2_poolingForward(float** reshapeArray, float****** array,
const int XN, const int OH, const int OW, const int XC, const int FH, const int FW) {
int i, j, k, l, m, n;
for (i = 0; i < XN; i++) {
for (j = 0; j < OH; j++) {
for (k = 0; k < OW; k++) {
for (l = 0; l < XC; l++) {
for (m = 0; m < FH; m++) {
for (n = 0; n < FW; n++) {
reshapeArray[i*(OH*OW*XC) + j*(OW*XC) + k*(XC)+l][m*(FW)+n] = array[i][j][k][l][m][n];
}
}
}
}
}
}
}
void Reshape4to2_forward(float** reshapeArray, float**** array,
const int FN, const int FC, const int FH, const int FW) {
int i, j, k, l;
for (i = 0; i < FN; i++) {
for (j = 0; j < FC; j++) {
for (k = 0; k < FH; k++) {
for (l = 0; l < FW; l++) {
reshapeArray[i][j*(FH*FW) + k*(FW)+l] = array[i][j][k][l];
}
}
}
}
}
void Reshape4to2_backward(float** reshapeArray, float**** array,
const int XN, const int OH, const int OW, const int FN) {
int i, j, k, l;
for (i = 0; i < XN; i++) {
for (j = 0; j < OH; j++) {
for (k = 0; k < OW; k++) {
for (l = 0; l < FN; l++) {
reshapeArray[i*(OH*OW) + j*(OW)+k][l] = array[i][j][k][l];
}
}
}
}
}
void Reshape4to2(char txt, float** reshapeArray, float**** array,
const int d1, const int d2, const int d3, const int d4) {
int FN, FC, FH, FW, XN, OH, OW;
int i, j, k, l;
switch (txt)
{
case 'f':
FN = d1;
FC = d2;
FH = d3;
FW = d4;
Reshape4to2_forward(reshapeArray, array, FN, FC, FH, FW);
break;
case 'b':
XN = d1;
OH = d2;
OW = d3;
FN = d4;
Reshape4to2_backward(reshapeArray, array, XN, OH, OW, FN);
break;
default:
cout << "Error for 'txt' variable in Reshape4to2(cpu)!" << endl;
break;
}
}
void Reshape2to4_forward(float**** reshapeArray, float** array,
const int XN, const int OH, const int OW, const int FN) {
int i, j, k, l;
for (i = 0; i < XN; i++) {
for (j = 0; j < OH; j++) {
for (k = 0; k < OW; k++) {
for (l = 0; l < FN; l++) {
reshapeArray[i][j][k][l] = array[i*(OH*OW) + j*(OW)+k][l];
}
}
}
}
}
void Reshape2to4_backward(float**** reshapeArray, float** array,
const int XN, const int XC, const int XH, const int XW) {
int i, j, k, l;
for (i = 0; i < XN; i++) {
for (j = 0; j < XC; j++) {
for (k = 0; k < XH; k++) {
for (l = 0; l < XW; l++) {
reshapeArray[i][j][k][l] = array[i][j*(XH*XW) + k*(XW)+l];
}
}
}
}
}
void Reshape2to4(char txt, float**** reshapeArray, float** array,
const int d1, const int d2, const int d3, const int d4) {
int XN, OH, OW, FN, XC, XH, XW;
int i, j, k, l;
switch (txt)
{
case 'f':
XN = d1;
OH = d2;
OW = d3;
FN = d4;
Reshape2to4_forward(reshapeArray, array, XN, OH, OW, FN);
break;
case 'b':
XN = d1;
XC = d2;
XH = d3;
XW = d4;
Reshape2to4_backward(reshapeArray, array, XN, XC, XH, XW);
break;
default:
cout << "Error for 'txt' variable in Reshape2to4(cpu)!" << endl;
break;
}
}
void Reshape2to6(float****** reshapeArray, float** array,
const int XN, const int OH, const int OW, const int XC, const int FH, const int FW) {
int i, j, k, l, m, n;
for (i = 0; i < XN; i++) {
for (j = 0; j < OH; j++) {
for (k = 0; k < OW; k++) {
for (l = 0; l < XC; l++) {
for (m = 0; m < FH; m++) {
for (n = 0; n < FW; n++) {
reshapeArray[i][j][k][l][m][n] = array[i*(OH*OW) + j*(OW)+k][l*(FH*FW) + m*(FH)+n];
}
}
}
}
}
}
}
void Reshape1to6(float****** reshapeArray, float* array,
const int d1, const int d2, const int d3, const int d4, const int d5, const int d6) {
int i, j, k, l, m, n;
for (i = 0; i < d1; i++) {
for (j = 0; j < d2; j++) {
for (k = 0; k < d3; k++) {
for (l = 0; l < d4; l++) {
for (m = 0; m < d5; m++) {
for (n = 0; n < d6; n++) {
reshapeArray[i][j][k][l][m][n] = array[i*(d2*d3*d4*d5*d6) + j*(d3*d4*d5*d6) + k*(d4*d5*d6) + l*(d5*d6) + m*(d6)+n];
}
}
}
}
}
}
}
void Reshape1to4(float**** reshapeArray, float* array,
const int XN, const int OH, const int OW, const int XC) {
int i, j, k, l;
for (i = 0; i < XN; i++) {
for (j = 0; j < OH; j++) {
for (k = 0; k < OW; k++) {
for (l = 0; l < XC; l++) {
reshapeArray[i][j][k][l] = array[i*(OH*OW*XC) + j*(OW*XC) + k*(XC)+l];
}
}
}
}
}
void Reshape1to2(float** reshapeArray, float* array,
const int d1, const int d2) {
int i, j;
for (i = 0; i < d1; i++) {
for (j = 0; j < d2; j++) {
reshapeArray[i][j] = array[i*(d2)+j];
}
}
}
void Transpose2d(float* array_transpose, float* array, const int r, const int c) {
int i, j;
for (i = 0; i < r; i++) {
for (j = 0; j < c; j++) {
array_transpose[j*r + i] = array[i*c + j];
}
}
}
__global__ void Kernel_Transpose2d(float* dev_transposeArray, float* dev_array,
const int r, const int c) {
//unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x;
//int N = r*c;
//int i, j, idx_transposeArray, idx_array;
//while (tid < N)
//{
// idx2d(tid, r, c, i, j);
// idx_array = i*c + j;
// idx_transposeArray = j*r + i;
// dev_transposeArray[idx_transposeArray] = dev_array[idx_array];
// tid += gridDim.x * blockDim.x;
//}
unsigned int i = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int j = blockDim.y * blockIdx.y + threadIdx.y;
if (i >= r || j >= c) return;
int idx_transposeArray, idx_array;
idx_array = i*c + j;
idx_transposeArray = j*r + i;
dev_transposeArray[idx_transposeArray] = dev_array[idx_array];
}
void Transpose2d_gpu(float* dev_transposeArray, float* dev_array, const int r, const int c) {
//dim3 dimBlock(BLOCK_SIZE);
//dim3 dimGrid(GRID_SIZE);
//Kernel_Transpose2d << < dimGrid, dimBlock >> > (dev_transposeArray, dev_array, r, c);
//hipDeviceSynchronize();
//gpuErrchk(hipGetLastError());
dim3 dimBlock(BLOCK_SIZE_X, BLOCK_SIZE_Y);
dim3 dimGrid((r + BLOCK_SIZE_X - 1) / BLOCK_SIZE_X, (c + BLOCK_SIZE_Y - 1) / BLOCK_SIZE_Y);
Kernel_Transpose2d << < dimGrid, dimBlock >> > (dev_transposeArray, dev_array, r, c);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
void Transpose4d_forward(float* array_transpose, float* array,
const int XN, const int OH, const int OW, const int FN) {
int i, j, k, l;
int idx_transpose, idx;
for (i = 0; i < XN; i++) {
for (j = 0; j < OH; j++) {
for (k = 0; k < OW; k++) {
for (l = 0; l < FN; l++) {
idx_transpose = i*(FN*OH*OW) + l*(OH*OW) + j*(OW)+k;
idx = i*(OH*OW*FN) + j*(OW*FN) + k*(FN)+l;
array_transpose[idx_transpose] = array[idx];
}
}
}
}
}
__global__ void Kernel_Transpose4d_forward(float* dev_transposeArray, float* dev_array,
const int XN, const int OH, const int OW, const int FN) {
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = XN*OH*OW*FN;
int i, j, k, l;
int idx_transposeArray, idx_array;
while (tid < N)
{
idx4d(tid, XN, OH, OW, FN, i, j, k, l);
idx_transposeArray = i*(FN*OH*OW) + l*(OH*OW) + j*(OW)+k;
idx_array = i*(OH*OW*FN) + j*(OW*FN) + k*(FN)+l;
dev_transposeArray[idx_transposeArray] = dev_array[idx_array];
tid += gridDim.x*blockDim.x;
}
}
void Transpose4d_backward(float* array_transpose, float* array,
const int XN, const int XC, const int OH, const int OW) {
int i, j, k, l;
int idx_transpose, idx;
for (i = 0; i < XN; i++) {
for (j = 0; j < XC; j++) {
for (k = 0; k < OH; k++) {
for (l = 0; l < OW; l++) {
idx_transpose = i*(OH*OW*XC) + k*(OW*XC) + l*(XC)+j;
idx = i*(XC*OH*OW) + j*(OH*OW) + k*(OW)+l;
array_transpose[idx_transpose] = array[idx];
}
}
}
}
}
__global__ void Kernel_Transpose4d_backward(float* dev_transposeArray, float* dev_array,
const int XN, const int FN, const int OH, const int OW) {
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = XN*FN*OH*OW;
int i, j, k, l;
int idx_transposeArray, idx_array;
while (tid < N)
{
idx4d(tid, XN, FN, OH, OW, i, j, k, l);
idx_transposeArray = i*(OH*OW*FN) + k*(OW*FN) + l*(FN)+j;
idx_array = i*(FN*OH*OW) + j*(OH*OW) + k*(OW)+l;
dev_transposeArray[idx_transposeArray] = dev_array[idx_array];
tid += gridDim.x*blockDim.x;
}
}
void Transpose4d(char txt, float* array_transpose, float* array,
const int d1, const int d2, const int d3, const int d4) {
int XN, OH, OW, FN, XC;
switch (txt)
{
case 'f':
XN = d1;
OH = d2;
OW = d3;
FN = d4;
Transpose4d_forward(array_transpose, array, XN, OH, OW, FN);
break;
case 'b':
XN = d1;
XC = d2;
OH = d3;
OW = d4;
Transpose4d_backward(array_transpose, array, XN, XC, OH, OW);
break;
default:
cout << "Error for 'txt' variable in Transpose4d(cpu)!" << endl;
break;
}
}
void Transpose4d_gpu(char txt, float* dev_transposeArray, float* dev_array,
const int d1, const int d2, const int d3, const int d4) {
int XN, OH, OW, FN;
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
switch (txt)
{
case 'f':
XN = d1;
OH = d2;
OW = d3;
FN = d4;
if (XN == 1) {
Transpose2d_gpu(dev_transposeArray, dev_array, OH*OW, FN);
}
else {
Kernel_Transpose4d_forward << < dimGrid, dimBlock >> > (dev_transposeArray, dev_array, XN, OH, OW, FN);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
//Kernel_Transpose4d_forward << < dimGrid, dimBlock >> > (dev_transposeArray, dev_array, XN, OH, OW, FN);
//hipDeviceSynchronize();
//gpuErrchk(hipGetLastError());
break;
case 'b':
XN = d1;
FN = d2;
OH = d3;
OW = d4;
if (XN == 1) {
Transpose2d_gpu(dev_transposeArray, dev_array, FN, OH*OW);
}
else {
Kernel_Transpose4d_backward << < dimGrid, dimBlock >> > (dev_transposeArray, dev_array, XN, FN, OH, OW);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
//Kernel_Transpose4d_backward << < dimGrid, dimBlock >> > (dev_transposeArray, dev_array, XN, FN, OH, OW);
//hipDeviceSynchronize();
//gpuErrchk(hipGetLastError());
break;
default:
cout << "Error for 'txt' variable in Transpose4d(gpu)!" << endl;
break;
}
}
void Transpose6d_forward(float* array_transpose, float* array,
const int XN, const int XC, const int FH, const int FW, const int OH, const int OW) {
int i, j, k, l, m, n;
int idx_transpose;
int idx;
for (i = 0; i < XN; i++) {
for (j = 0; j < XC; j++) {
for (k = 0; k < FH; k++) {
for (l = 0; l < FW; l++) {
for (m = 0; m < OH; m++) {
for (n = 0; n < OW; n++) {
idx_transpose = i*(OH*OW*XC*FH*FW) + m*(OW*XC*FH*FW) + n*(XC*FH*FW) + j*(FH*FW) + k*(FW)+l;
idx = i*(XC*FH*FW*OH*OW) + j*(FH*FW*OH*OW) + k*(FW*OH*OW) + l*(OH*OW) + m*(OW)+n;
array_transpose[idx_transpose] = array[idx];
}
}
}
}
}
}
}
__global__ void Kernel_Transpose6d_forward(float* dev_transposeArray, float* dev_array,
const int XN, const int XC, const int FH, const int FW, const int OH, const int OW) {
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = XN*XC*FH*FW*OH*OW;
int i, j, k, l, m, n;
int idx_transposeArray;
int idx_array;
while (tid < N)
{
idx6d(tid, XN, XC, FH, FW, OH, OW, i, j, k, l, m, n);
idx_transposeArray = i*(OH*OW*XC*FH*FW) + m*(OW*XC*FH*FW) + n*(XC*FH*FW) + j*(FH*FW) + k*(FW)+l;
idx_array = i*(XC*FH*FW*OH*OW) + j*(FH*FW*OH*OW) + k*(FW*OH*OW) + l*(OH*OW) + m*(OW)+n;
dev_transposeArray[idx_transposeArray] = dev_array[idx_array];
tid += gridDim.x *blockDim.x;
}
}
void Transpose6d_backward(float* array_transpose, float* array,
const int XN, const int OH, const int OW, const int XC, const int FH, const int FW) {
int i, j, k, l, m, n;
int idx_transpose;
int idx;
for (i = 0; i < XN; i++) {
for (j = 0; j < OH; j++) {
for (k = 0; k < OW; k++) {
for (l = 0; l < XC; l++) {
for (m = 0; m < FH; m++) {
for (n = 0; n < FW; n++) {
idx_transpose = i*(XC*FH*FW*OH*OW) + l*(FH*FW*OH*OW) + m*(FW*OH*OW) + n*(OH*OW) + j*(OW)+k;
idx = i*(OH*OW*XC*FH*FW) + j*(OW*XC*FH*FW) + k*(XC*FH*FW) + l*(FH*FW) + m*(FH)+n;
array_transpose[idx_transpose] = array[idx];
}
}
}
}
}
}
}
__global__ void Kernel_Transpose6d_backward(float* dev_transposeArray, float* dev_array,
const int XN, const int OH, const int OW, const int XC, const int FH, const int FW) {
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = XN*OH*OW*XC*FH*FW;
int i, j, k, l, m, n;
int idx_transposeArray;
int idx_array;
while (tid < N)
{
idx6d(tid, XN, OH, OW, XC, FH, FW, i, j, k, l, m, n);
idx_transposeArray = i*(XC*FH*FW*OH*OW) + l*(FH*FW*OH*OW) + m*(FW*OH*OW) + n*(OH*OW) + j*(OW)+k;
idx_array = i*(OH*OW*XC*FH*FW) + j*(OW*XC*FH*FW) + k*(XC*FH*FW) + l*(FH*FW) + m*(FH)+n;
dev_transposeArray[idx_transposeArray] = dev_array[idx_array];
tid += gridDim.x*blockDim.x;
}
}
void Transpose6d(char txt, float* array_transpose, float* array,
const int d1, const int d2, const int d3, const int d4, const int d5, const int d6) {
int XN, OH, OW, FN, XC, FH, FW;
int i, j, k, l;
switch (txt)
{
case 'f':
XN = d1;
XC = d2;
FH = d3;
FW = d4;
OH = d5;
OW = d6;
Transpose6d_forward(array_transpose, array, XN, XC, FH, FW, OH, OW);
break;
case 'b':
XN = d1;
OH = d2;
OW = d3;
XC = d4;
FH = d5;
FW = d6;
Transpose6d_backward(array_transpose, array, XN, OH, OW, XC, FH, FW);
break;
default:
cout << "Error for 'txt' variable in Transpose6d(cpu)!" << endl;
break;
}
}
void Transpose6d_gpu(char txt, float* dev_transposeArray, float* dev_array,
const int d1, const int d2, const int d3, const int d4, const int d5, const int d6) {
int XN, OH, OW, FN, XC, FH, FW;
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
switch (txt)
{
case 'f':
XN = d1;
XC = d2;
FH = d3;
FW = d4;
OH = d5;
OW = d6;
if (XN == 1) {
Transpose2d_gpu(dev_transposeArray, dev_array, XC*FH*FW, OH*OW);
}
else {
Kernel_Transpose6d_forward << < dimGrid, dimBlock >> > (dev_transposeArray, dev_array, XN, XC, FH, FW, OH, OW);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
//Kernel_Transpose6d_forward << < dimGrid, dimBlock >> > (dev_transposeArray, dev_array, XN, XC, FH, FW, OH, OW);
//hipDeviceSynchronize();
//gpuErrchk(hipGetLastError());
break;
case 'b':
XN = d1;
OH = d2;
OW = d3;
XC = d4;
FH = d5;
FW = d6;
if (XN == 1) {
Transpose2d_gpu(dev_transposeArray, dev_array, OH*OW, XC*FH*FW);
}
else {
Kernel_Transpose6d_backward << < dimGrid, dimBlock >> > (dev_transposeArray, dev_array, XN, OH, OW, XC, FH, FW);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
//Kernel_Transpose6d_backward << < dimGrid, dimBlock >> > (dev_transposeArray, dev_array, XN, OH, OW, XC, FH, FW);
//hipDeviceSynchronize();
//gpuErrchk(hipGetLastError());
break;
default:
cout << "Error for 'txt' variable in Transpose6d(gpu)!" << endl;
break;
}
}
void Argmax(int* argMax, float** array, const int r, const int c) {
int idx;
float temp;
for (int i = 0; i < r; i++) {
idx = 0;
temp = 0.0;
for (int j = 0; j < c; j++) {
if (array[i][j] > temp) {
temp = array[i][j];
idx = j;
}
}
argMax[i] = idx;
}
}
__global__ void Kernel_Argmax(int* dev_argMax, float* dev_array, const int r, const int c) {
unsigned int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= r) return;
int idx;
float temp = 0.0;
for (int j = 0; j < c; j++) {
if (dev_array[i*c + j] > temp) {
temp = dev_array[i*c + j];
idx = j;
}
}
dev_argMax[i] = idx;
}
void Argmax_gpu(int* dev_argMax, float* dev_array, const int r, const int c) {
dim3 dimBlock(BLOCK_SIZE_X);
dim3 dimGrid((r + BLOCK_SIZE_X - 1) / BLOCK_SIZE_X);
if (dimGrid.x > MAX_GRID_SIZE) {
cout << "dimension of Grid exceeds " << MAX_GRID_SIZE << " in 'Argmax_gpu'!" << endl;
}
Kernel_Argmax << < dimGrid, dimBlock >> > (dev_argMax, dev_array, r, c);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
void Max(float* array_max, int* arg_max, float* array,
const int r, const int c) {
float temp;
int idx;
for (int i = 0; i < r; i++) {
idx = 0;
temp = 0.0;
for (int j = 0; j < c; j++) {
if (array[i*c + j] > temp) {
temp = array[i*c + j];
idx = j;
}
}
arg_max[i] = idx;
array_max[i] = temp;
}
}
__global__ void Kernel_Max(float* dev_arrayMax, int* dev_argMax, float* dev_array,
const int r, const int c) {
unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x;
int N = r;
int idx, i;
float temp;
while (tid < N)
{
i = tid;
temp = 0.;
idx = 0;
for (int j = 0; j < c; j++) {
if (j == 0) temp = dev_array[i*c + j], idx = 0;
else if (dev_array[i*c + j] > temp) temp = dev_array[i*c + j], idx = j;
}
dev_argMax[i] = idx;
dev_arrayMax[i] = temp;
tid += gridDim.x*blockDim.x;
}
}
void Max_gpu(float* dev_arrayMax, int* dev_argMax, float* dev_array,
const int r, const int c) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
Kernel_Max << < dimGrid, dimBlock >> > (dev_arrayMax, dev_argMax, dev_array, r, c);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
void Avg(float* array_avg, float* array,
const int r, const int c)
{
float sum;
for (int i = 0; i < r; i++) {
sum = 0.0;
for (int j = 0; j < c; j++) {
sum += array[i*c + j];
}
array_avg[i] = sum / c;
}
}
__global__ void Kernel_Avg(float* dev_arrayMax, float* dev_array,
const int r, const int c) {
unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x;
int N = r;
float sum;
int i;
while (tid < N)
{
i = tid;
sum = 0.0;
for (int j = 0; j < c; j++) {
sum += dev_array[i*c + j];
}
dev_arrayMax[i] = sum / c;
tid += gridDim.x*blockDim.x;
}
}
void Avg_gpu(float* dev_arrayMax, float* dev_array,
const int r, const int c) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
Kernel_Avg << < dimGrid, dimBlock >> > (dev_arrayMax, dev_array, r, c);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
void Function1_poolingBackward(float* dmax, int* arg_max, float* array,
const int i_dmax, const int j_dmax) {
int i, j;
int r = i_dmax, c = j_dmax;
for (i = 0; i < r; i++) {
for (j = 0; j < c; j++) {
dmax[i*c + j] = 0;
}
dmax[i*c + arg_max[i]] = array[i];
}
}
__global__ void Kernel_Function1_poolingBackward(float* dev_dmax, int* dev_argMax, float* dev_flattenDout,
const int i_dmax, const int j_dmax) {
unsigned int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = i_dmax*j_dmax;
int i, j;
while (tid < N)
{
idx2d(tid, i_dmax, j_dmax, i, j);
dev_dmax[i*j_dmax + j] = 0;
dev_dmax[i*j_dmax + (dev_argMax[i])] = dev_flattenDout[i];
tid += gridDim.x*blockDim.x;
}
}
void Function1_poolingBackward_gpu(float* dev_dmax, int* dev_argMax, float* dev_flattenDout,
const int i_dmax, const int j_dmax) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
Kernel_Function1_poolingBackward << < dimGrid, dimBlock >> > (dev_dmax, dev_argMax, dev_flattenDout, i_dmax, j_dmax);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
void Function1_poolingBackward_avg(float* dmax, float* array,
const int i_dmax, const int j_dmax)
{
int i, j;
int r = i_dmax, c = j_dmax;
for (i = 0; i < r; i++) {
for (j = 0; j < c; j++) {
dmax[i*c + j] = array[i] / c;
}
}
}
__global__ void Kernel_Function1_poolingBackward_avg(float* dev_dmax, float* dev_flattenDout,
const int i_dmax, const int j_dmax) {
unsigned int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = i_dmax*j_dmax;
int i, j;
while (tid < N)
{
idx2d(tid, i_dmax, j_dmax, i, j);
dev_dmax[i*j_dmax + j] = dev_flattenDout[i] / j_dmax;
tid += gridDim.x*blockDim.x;
}
}
void Function1_poolingBackward_avg_gpu(float* dev_dmax, float* dev_flattenDout,
const int i_dmax, const int j_dmax) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
Kernel_Function1_poolingBackward_avg << < dimGrid, dimBlock >> > (dev_dmax, dev_flattenDout, i_dmax, j_dmax);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
void Function2_poolingBackward(float** dcol, float** dmax,
const int XN, const int OH, const int OW, const int XC, const int FH, const int FW) {
int i, j, k, l, m, n;
for (i = 0; i < XN; i++) {
for (j = 0; j < OH; j++) {
for (k = 0; k < OW; k++) {
for (l = 0; l < XC; l++) {
for (m = 0; m < FH; m++) {
for (n = 0; n < FW; n++) {
dcol[i*(OH*OW) + j*(OW)+k][l*(FH*FW) + m*(FH)+n] = dmax[i*(OH*OW*XC) + j*(OW*XC) + k*(XC)+l][m*(FW)+n];
}
}
}
}
}
}
}
__global__ void Kernel_Function_reluForward(float* dev_x, int* dev_index, const int size) {
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = size;
while (tid < N)
{
if (dev_x[tid] > 0) dev_index[tid] = 1;
else dev_index[tid] = 0;
dev_x[tid] *= dev_index[tid];
tid += gridDim.x*blockDim.x;
}
}
void Function_reluForward_gpu(float* dev_x, int* dev_index, const int size) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
Kernel_Function_reluForward << < dimGrid, dimBlock >> > (dev_x, dev_index, size);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
__global__ void Kernel_Function_reluBackward(float* dev_dout, int* dev_index, const int size) {
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = size;
while (tid < N)
{
dev_dout[tid] *= dev_index[tid];
tid += gridDim.x*blockDim.x;
}
}
void Function_reluBackward_gpu(float* dev_dout, int* dev_index, const int size) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
Kernel_Function_reluBackward << < dimGrid, dimBlock >> > (dev_dout, dev_index, size);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
__global__ void Kernel_softmaxBackward(float* dev_dx, float* dev_y, int* dev_t,
const int r, const int c) {
unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x;
int N = r*c;
while (tid < N)
{
dev_dx[tid] = (dev_y[tid] - dev_t[tid]) / r;
tid += gridDim.x*blockDim.x;
}
}
void Function_softmaxBackward_gpu(float* dev_dx, float* dev_y, int* dev_t,
const int r, const int c) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
Kernel_softmaxBackward << < dimGrid, dimBlock >> > (dev_dx, dev_y, dev_t, r, c);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
/*batch*/
__global__ void Kernel_Function_batch1(float* dev_x, float* dev_x_batch,
const int BN, const int XC, const int XH, const int XW,
int randomNumber) {
int i, j, k, l;
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = BN*XC*XH*XW;
if (tid >= N) return;
idx4d(tid, BN, XC, XH, XW, i, j, k, l);
int idx_batch = i*(XC*XH*XW) + j*(XH*XW) + k*(XW)+l;
int idx = (i + randomNumber)*(XC*XH*XW) + j*(XH*XW) + k*(XW)+l;
dev_x_batch[idx_batch] = dev_x[idx];
}
__global__ void Kernel_Function_batch2(int* dev_t, int* dev_t_batch,
const int BN, const int ON, int randomNumber) {
int i, j;
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = BN*ON;
if (tid >= N) return;
idx2d(tid, BN, ON, i, j);
int idx_batch = i*ON + j;
int idx = (i + randomNumber)*ON + j;
dev_t_batch[idx_batch] = dev_t[idx];
}
void Function_batch_gpu(float* dev_x, int* dev_t, float* dev_x_batch, int* dev_t_batch,
const int BN, const int XC, const int XH, const int XW,
const int ON, int randomNumber) {
int size = BN*XC*XH*XW;
dim3 dimBlock1(BLOCK_SIZE);
dim3 dimGrid1((size + BLOCK_SIZE - 1) / BLOCK_SIZE);
if (dimGrid1.x > MAX_GRID_SIZE) {
cout << "dimension of Grid exceeds " << MAX_GRID_SIZE << " in 'Function_batch_gpu'!" << endl;
}
Kernel_Function_batch1 << < dimGrid1, dimBlock1 >> > (dev_x, dev_x_batch, BN, XC, XH, XW, randomNumber);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
size = BN*ON;
dim3 dimBlock2(BLOCK_SIZE);
dim3 dimGrid2((size + BLOCK_SIZE - 1) / BLOCK_SIZE);
if (dimGrid2.x > MAX_GRID_SIZE) {
cout << "dimension of Grid exceeds " << MAX_GRID_SIZE << " in 'Function_batch_gpu'!" << endl;
}
Kernel_Function_batch2 << < dimGrid2, dimBlock2 >> > (dev_t, dev_t_batch, BN, ON, randomNumber);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
/*dropout*/
__global__ void Kernel_Function_dropoutinit(unsigned int seed, hiprandState_t* states, const int size) {
unsigned int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = size;
while (tid < N)
{
hiprand_init(seed, /* the seed can be the same for each core, here we pass the time in from the cpu */
tid, /* the sequence number should be different for each core (unless you want all
cores to get the same sequence of numbers for some reason - use thread id! */
0, /* the offset is how much extra we advance in the sequence for each call, can be 0 */
&states[tid]);
tid += gridDim.x*blockDim.x;
}
}
void Function_dropoutinit_gpu(unsigned int seed, hiprandState_t* states, const int size) {
dim3 dimBlock(512);
dim3 dimGrid(GRID_SIZE);
Kernel_Function_dropoutinit << < dimGrid, dimBlock >> > (seed, states, size);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
__global__ void Kernel_Function_dropoutForward(float* dev_x, int* dev_index, const int size,
float dropoutRatio, int train_flg,
hiprandState_t* states) {
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = size;
float randomNumber;
while (tid < N)
{
if (train_flg == 1) {
randomNumber = hiprand_uniform(&states[tid]);
if (randomNumber > dropoutRatio) dev_index[tid] = 1;
else dev_index[tid] = 0;
dev_x[tid] *= dev_index[tid];
}
else {
dev_x[tid] *= (1.0/* - dropoutRatio*/);
}
tid += gridDim.x*blockDim.x;
}
}
void Function_dropoutForward_gpu(float* dev_x, int* dev_index, const int size,
float dropoutRatio, int train_flg,
hiprandState_t* states) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
Kernel_Function_dropoutForward << < dimGrid, dimBlock >> > (dev_x, dev_index, size, dropoutRatio, train_flg, states);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
__global__ void Kernel_Function_dropoutBackward(float* dev_dout, int* dev_index, const int size) {
unsigned int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = size;
while (tid < N)
{
dev_dout[tid] *= dev_index[tid];
tid += gridDim.x*blockDim.x;
}
}
void Function_dropoutBackward_gpu(float* dev_dout, int* dev_index, const int size) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
Kernel_Function_dropoutBackward << < dimGrid, dimBlock >> > (dev_dout, dev_index, size);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
/*skip connection*/
__global__ void Kernel_Function_sc(float* dev_x, float* dev_x_skip, int size) {
unsigned int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = size;
while (tid < N)
{
dev_x[tid] += dev_x_skip[tid];
tid += gridDim.x*blockDim.x;
}
}
void Function_sc_gpu(float* dev_x, float* dev_x_skip, int size) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
Kernel_Function_sc << < dimGrid, dimBlock >> > (dev_x, dev_x_skip, size);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
/*BN*/
__global__ void Kernel_Function_bninit(float* dev_gamma, const int DN) {
unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid >= DN) return;
dev_gamma[tid] = 1;
}
void Function_bninit_gpu(float* dev_gamma, const int DN) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid((DN + BLOCK_SIZE - 1) / BLOCK_SIZE);
if (dimGrid.x > MAX_GRID_SIZE) {
cout << "dimension of Grid exceeds " << MAX_GRID_SIZE << " in 'Function_bninit'!" << endl;
}
Kernel_Function_bninit << < dimGrid, dimBlock >> > (dev_gamma, DN);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
__global__ void Kernel_Function1_bnForward(float* dev_mu, float* dev_x,
const int XN, const int DN) {
unsigned int cacheIdx = threadIdx.x;
__shared__ float cache[BLOCK_SIZE];
cache[cacheIdx] = 0;
__syncthreads();
unsigned int tid = blockIdx.x + threadIdx.x * DN;
if (threadIdx.x < XN) {
cache[cacheIdx] = dev_x[tid];
__syncthreads();
}
int k = blockDim.x / 2;
while (k != 0) {
if (cacheIdx < k) cache[cacheIdx] += cache[cacheIdx + k];
__syncthreads();
k /= 2;
}
if (cacheIdx == 0) dev_mu[blockIdx.x] = cache[0] / XN;
__syncthreads();
}
void Function1_bnForward_gpu(float* dev_mu, float* dev_x,
const int XN, const int DN) {
if (XN > BLOCK_SIZE) {
cout << "Batch size(XN) > " << BLOCK_SIZE << " in 'Function1_bnForward_gpu'" << endl;
}
Kernel_Function1_bnForward << < DN, BLOCK_SIZE >> > (dev_mu, dev_x, XN, DN);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
__global__ void Kernel_Function2_bnForward(float* dev_xc, float* dev_x, float* dev_mu,
const int XN, const int DN) {
int i = blockDim.x*blockIdx.x + threadIdx.x;
int j = blockDim.y*blockIdx.y + threadIdx.y;
if (i >= XN || j >= DN) return;
int idx = i*DN + j;
dev_xc[idx] = dev_x[idx] - dev_mu[j];
}
void Function2_bnForward_gpu(float* dev_xc, float* dev_x, float* dev_mu,
const int XN, const int DN) {
dim3 dimBlock(BLOCK_SIZE_X, BLOCK_SIZE_Y);
dim3 dimGrid((XN + BLOCK_SIZE_X - 1) / BLOCK_SIZE_X, (DN + BLOCK_SIZE_Y - 1) / BLOCK_SIZE_Y);
if (dimGrid.x > MAX_GRID_SIZE || dimGrid.y > MAX_GRID_SIZE) {
cout << "dimension of Grid exceeds " << MAX_GRID_SIZE << " in 'Function2_bnForward_gpu'!" << endl;
}
Kernel_Function2_bnForward << < dimGrid, dimBlock >> > (dev_xc, dev_x, dev_mu, XN, DN);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
__global__ void Kernel_Function3_bnForward(float* dev_std, float* dev_xc,
const int XN, const int DN) {
unsigned int cacheIdx = threadIdx.x;
__shared__ float cache[BLOCK_SIZE];
cache[cacheIdx] = 0;
__syncthreads();
unsigned int tid = blockIdx.x + threadIdx.x * DN;
if (threadIdx.x < XN) {
cache[cacheIdx] = dev_xc[tid] * dev_xc[tid];
__syncthreads();
}
int k = blockDim.x / 2;
while (k != 0) {
if (cacheIdx < k) cache[cacheIdx] += cache[cacheIdx + k];
__syncthreads();
k /= 2;
}
if (cacheIdx == 0) dev_std[blockIdx.x] = sqrtf(cache[0] / XN + 1e-7);
__syncthreads();
}
void Function3_bnForward_gpu(float* dev_std, float* dev_xc,
const int XN, const int DN) {
if (XN > BLOCK_SIZE) {
cout << "Batch size(XN) > " << BLOCK_SIZE << " in 'Function3_bnForward_gpu'" << endl;
}
Kernel_Function3_bnForward << < DN, BLOCK_SIZE >> > (dev_std, dev_xc, XN, DN);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
__global__ void Kernel_Function4_bnForward(float* dev_xn, float* dev_xc, float* dev_std,
const int XN, const int DN) {
int i = blockDim.x*blockIdx.x + threadIdx.x;
int j = blockDim.y*blockIdx.y + threadIdx.y;
if (i >= XN || j >= DN) return;
int idx = i*DN + j;
dev_xn[idx] = dev_xc[idx] / dev_std[j];
}
void Function4_bnForward_gpu(float* dev_xn, float* dev_xc, float* dev_std,
const int XN, const int DN) {
dim3 dimBlock(BLOCK_SIZE_X, BLOCK_SIZE_Y);
dim3 dimGrid((XN + BLOCK_SIZE_X - 1) / BLOCK_SIZE_X, (DN + BLOCK_SIZE_Y - 1) / BLOCK_SIZE_Y);
if (dimGrid.x > MAX_GRID_SIZE || dimGrid.y > MAX_GRID_SIZE) {
cout << "dimension of Grid exceeds " << MAX_GRID_SIZE << " in 'Function4_bnForward_gpu'!" << endl;
}
Kernel_Function4_bnForward << < dimGrid, dimBlock >> > (dev_xn, dev_xc, dev_std, XN, DN);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
__global__ void Kernel_Function5_bnForward(float* dev_running_mean, float* dev_running_var, float* dev_mu, float* dev_std,
float momentum, const int DN) {
unsigned int tid = blockDim.x*blockIdx.x + threadIdx.x;
dev_running_mean[tid] = momentum * dev_running_mean[tid] + (1 - momentum) * dev_mu[tid];
dev_running_var[tid] = momentum * dev_running_var[tid] + (1 - momentum) * dev_std[tid] * dev_std[tid];
}
void Function5_bnForward_gpu(float* dev_running_mean, float* dev_running_var, float* dev_mu, float* dev_std,
float momentum, const int DN) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid((DN + BLOCK_SIZE - 1) / BLOCK_SIZE);
if (dimGrid.x > MAX_GRID_SIZE) {
cout << "dimension of Grid exceeds " << MAX_GRID_SIZE << " in 'Function5_bnForward_gpu'!" << endl;
}
//Kernel_Function5_bnForward << < dimGrid, dimBlock >> > (dev_running_mean, dev_running_var, dev_mu, dev_std, momentum, DN);
//hipDeviceSynchronize();
//gpuErrchk(hipGetLastError());
}
__global__ void Kernel_Function6_bnForward(float* dev_x, float* dev_running_mean, float* dev_running_var,
const int XN, const int DN) {
unsigned int i = blockDim.x*blockIdx.x + threadIdx.x;
unsigned int j = blockDim.y*blockIdx.y + threadIdx.y;
if (i >= XN || j >= DN) return;
unsigned int idx = i*DN + j;
dev_x[idx] = (dev_x[idx] - dev_running_mean[j]) / sqrtf(dev_running_var[j] + 1e-7);
}
void Function6_bnForward_gpu(float* dev_x, float* dev_running_mean, float* dev_running_var,
const int XN, const int DN) {
dim3 dimBlock(BLOCK_SIZE_X, BLOCK_SIZE_Y);
dim3 dimGrid((XN + BLOCK_SIZE_X - 1) / BLOCK_SIZE_X, (DN + BLOCK_SIZE_Y - 1) / BLOCK_SIZE_Y);
if (dimGrid.x > MAX_GRID_SIZE || dimGrid.y > MAX_GRID_SIZE) {
cout << "dimension of Grid exceeds " << MAX_GRID_SIZE << " in 'Function6_bnForward_gpu'!" << endl;
}
Kernel_Function6_bnForward << < dimGrid, dimBlock >> > (dev_x, dev_running_mean, dev_running_var, XN, DN);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
__global__ void Kernel_Function7_bnForward(float* dev_x, float* dev_out, float* dev_gamma, float* dev_beta,
const int XN, const int DN) {
unsigned int i = blockDim.x*blockIdx.x + threadIdx.x;
unsigned int j = blockDim.y*blockIdx.y + threadIdx.y;
if (i >= XN || j >= DN) return;
unsigned int idx = i*DN + j;
dev_x[idx] = dev_gamma[j] * dev_out[idx] + dev_beta[j];
}
void Function7_bnForward_gpu(float* dev_x, float* dev_out, float* dev_gamma, float* dev_beta,
const int XN, const int DN) {
dim3 dimBlock(BLOCK_SIZE_X, BLOCK_SIZE_Y);
dim3 dimGrid((XN + BLOCK_SIZE_X - 1) / BLOCK_SIZE_X, (DN + BLOCK_SIZE_Y - 1) / BLOCK_SIZE_Y);
if (dimGrid.x > MAX_GRID_SIZE || dimGrid.y > MAX_GRID_SIZE) {
cout << "dimension of Grid exceeds " << MAX_GRID_SIZE << " in 'Function7_bnForward_gpu'!" << endl;
}
Kernel_Function7_bnForward << < dimGrid, dimBlock >> > (dev_x, dev_out, dev_gamma, dev_beta, XN, DN);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
__global__ void Function_bnForward(float* dev_running_mean, float* dev_running_var, float* dev_mu, float* dev_std,
float momentum, const int DN) {
unsigned int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i >= DN) return;
dev_running_mean[i] = momentum * dev_running_mean[i] + (1 - momentum) * dev_mu[i];
dev_running_var[i] = momentum * dev_running_var[i] + (1 - momentum) * dev_std[i] * dev_std[i];
}
void Function_bnForward_gpu(float* dev_running_mean, float* dev_running_var, float* dev_mu, float* dev_std,
float momentum, const int DN) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid((DN + BLOCK_SIZE - 1) / BLOCK_SIZE);
if (dimGrid.x > MAX_GRID_SIZE) {
cout << "dimension of Grid exceeds " << MAX_GRID_SIZE << " in 'Function_bnForward_test_gpu'!" << endl;
}
Function_bnForward << < dimGrid, dimBlock >> > (dev_running_mean, dev_running_var, dev_mu, dev_std, momentum, DN);
gpuErrchk(hipGetLastError());
}
__global__ void Kernel_Function1_bnBackward(float* dev_dbeta, float* dev_dout,
const int XN, const int DN) {
unsigned int cacheIdx = threadIdx.x;
__shared__ float cache[BLOCK_SIZE];
cache[cacheIdx] = 0;
__syncthreads();
unsigned int tid = blockIdx.x + threadIdx.x * DN;
if (threadIdx.x < XN) {
cache[cacheIdx] = dev_dout[tid];
__syncthreads();
}
int k = blockDim.x / 2;
while (k != 0) {
if (cacheIdx < k) cache[cacheIdx] += cache[cacheIdx + k];
__syncthreads();
k /= 2;
}
if (cacheIdx == 0) dev_dbeta[blockIdx.x] = cache[0];
__syncthreads();
}
void Function1_bnBackward_gpu(float* dev_dbeta, float* dev_dout,
const int XN, const int DN) {
if (XN > BLOCK_SIZE) {
cout << "Batch size(XN) > " << BLOCK_SIZE << " in 'Function1_bnBackward_gpu'" << endl;
}
Kernel_Function1_bnBackward << < DN, BLOCK_SIZE >> > (dev_dbeta, dev_dout, XN, DN);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
__global__ void Kernel_Function2_bnBackward(float* dev_dgamma, float* dev_xn, float* dev_dout,
const int XN, const int DN) {
unsigned int cacheIdx = threadIdx.x;
__shared__ float cache[BLOCK_SIZE];
cache[cacheIdx] = 0;
__syncthreads();
unsigned int tid = blockIdx.x + threadIdx.x * DN;
if (threadIdx.x < XN) {
cache[cacheIdx] = dev_xn[tid] * dev_dout[tid];
__syncthreads();
}
int k = blockDim.x / 2;
while (k != 0) {
if (cacheIdx < k) cache[cacheIdx] += cache[cacheIdx + k];
__syncthreads();
k /= 2;
}
if (cacheIdx == 0) dev_dgamma[blockIdx.x] = cache[0];
__syncthreads();
}
void Function2_bnBackward_gpu(float* dev_dgamma, float* dev_xn, float* dev_dout,
const int XN, const int DN) {
if (XN > BLOCK_SIZE) {
cout << "Batch size(XN) > " << BLOCK_SIZE << " in 'Function2_bnBackward_gpu'" << endl;
}
Kernel_Function2_bnBackward << < DN, BLOCK_SIZE >> > (dev_dgamma, dev_xn, dev_dout, XN, DN);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
__global__ void Kernel_Function3_bnBackward(float* dev_dxn, float* dev_gamma, float* dev_dout, float* dev_dxc, float* dev_std,
const int XN, const int DN) {
unsigned int i = blockDim.x*blockIdx.x + threadIdx.x;
unsigned int j = blockDim.y*blockIdx.y + threadIdx.y;
if (i >= XN || j >= DN) return;
unsigned int idx = i*DN + j;
dev_dxn[idx] = dev_gamma[j] * dev_dout[idx];
dev_dxc[idx] = dev_dxn[idx] / dev_std[j];
}
void Function3_bnBackward_gpu(float* dev_dxn, float* dev_gamma, float* dev_dout, float* dev_dxc, float* dev_std,
const int XN, const int DN) {
dim3 dimBlock(BLOCK_SIZE_X, BLOCK_SIZE_Y);
dim3 dimGrid((XN + BLOCK_SIZE_X - 1) / BLOCK_SIZE_X, (DN + BLOCK_SIZE_Y - 1) / BLOCK_SIZE_Y);
if (dimGrid.x > MAX_GRID_SIZE || dimGrid.y > MAX_GRID_SIZE) {
cout << "dimension of Grid exceeds " << MAX_GRID_SIZE << " in 'Function3_bnBackward_gpu'!" << endl;
}
Kernel_Function3_bnBackward << < dimGrid, dimBlock >> > (dev_dxn, dev_gamma, dev_dout, dev_dxc, dev_std, XN, DN);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
__global__ void Kernel_Function4_bnBackward(float* dev_dstd, float* dev_dxn, float* dev_xc, float* dev_std,
const int XN, const int DN) {
unsigned int cacheIdx = threadIdx.x;
__shared__ float cache[BLOCK_SIZE];
cache[cacheIdx] = 0;
__syncthreads();
unsigned int tid = blockIdx.x + threadIdx.x * DN;
if (threadIdx.x < XN) {
cache[cacheIdx] = dev_dxn[tid] * dev_xc[tid] / (dev_std[blockIdx.x] * dev_std[blockIdx.x]);
__syncthreads();
}
int k = blockDim.x / 2;
while (k != 0) {
if (cacheIdx < k) cache[cacheIdx] += cache[cacheIdx + k];
__syncthreads();
k /= 2;
}
if (cacheIdx == 0) dev_dstd[blockIdx.x] = -cache[0];
__syncthreads();
}
void Function4_bnBackward_gpu(float* dev_dstd, float* dev_dxn, float* dev_xc, float* dev_std,
const int XN, const int DN) {
if (XN > BLOCK_SIZE) {
cout << "Batch size(XN) > " << BLOCK_SIZE << " in 'Function4_bnBackward_gpu'" << endl;
}
Kernel_Function4_bnBackward << < DN, BLOCK_SIZE >> > (dev_dstd, dev_dxn, dev_xc, dev_std, XN, DN);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
__global__ void Kernel_Function5_bnBackward(float* dev_dxc, float* dev_xc, float* dev_dstd, float* dev_std,
const int XN, const int DN, int batch_size) {
unsigned int i = blockDim.x*blockIdx.x + threadIdx.x;
unsigned int j = blockDim.y*blockIdx.y + threadIdx.y;
if (i >= XN || j >= DN) return;
unsigned int idx = i*DN + j;
float dvar = 0.5 * dev_dstd[j] / dev_std[j];
dev_dxc[idx] = dev_dxc[idx] + (2.0 / batch_size) * dev_xc[idx] * dvar;
}
void Function5_bnBackward_gpu(float* dev_dxc, float* dev_xc, float* dev_dstd, float* dev_std,
const int XN, const int DN, int batch_size) {
dim3 dimBlock(BLOCK_SIZE_X, BLOCK_SIZE_Y);
dim3 dimGrid((XN + BLOCK_SIZE_X - 1) / BLOCK_SIZE_X, (DN + BLOCK_SIZE_Y - 1) / BLOCK_SIZE_Y);
if (dimGrid.x > MAX_GRID_SIZE || dimGrid.y > MAX_GRID_SIZE) {
cout << "dimension of Grid exceeds " << MAX_GRID_SIZE << " in 'Function5_bnBackward_gpu'!" << endl;
}
Kernel_Function5_bnBackward << < dimGrid, dimBlock >> > (dev_dxc, dev_xc, dev_dstd, dev_std, XN, DN, batch_size);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
__global__ void Kernel_Function6_bnBackward(float* dev_dmu, float* dev_dxc,
const int XN, const int DN) {
unsigned int cacheIdx = threadIdx.x;
__shared__ float cache[BLOCK_SIZE];
cache[cacheIdx] = 0;
__syncthreads();
unsigned int tid = blockIdx.x + threadIdx.x * DN;
if (threadIdx.x < XN) {
cache[cacheIdx] = dev_dxc[tid];
__syncthreads();
}
int k = blockDim.x / 2;
while (k != 0) {
if (cacheIdx < k) cache[cacheIdx] += cache[cacheIdx + k];
__syncthreads();
k /= 2;
}
if (cacheIdx == 0) dev_dmu[blockIdx.x] = cache[0];
__syncthreads();
}
void Function6_bnBackward_gpu(float* dev_dmu, float* dev_dxc,
const int XN, const int DN) {
if (XN > BLOCK_SIZE) {
cout << "Batch size(XN) > " << BLOCK_SIZE << " in 'Function6_bnBackward_gpu'" << endl;
}
Kernel_Function6_bnBackward << < DN, BLOCK_SIZE >> > (dev_dmu, dev_dxc, XN, DN);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
__global__ void Kernel_Function7_bnBackward(float* dev_dout, float* dev_dxc, float* dev_dmu,
const int XN, const int DN, int batch_size) {
unsigned int i = blockDim.x*blockIdx.x + threadIdx.x;
unsigned int j = blockDim.y*blockIdx.y + threadIdx.y;
if (i >= XN || j >= DN) return;
unsigned int idx = i*DN + j;
dev_dout[idx] = dev_dxc[idx] - (dev_dmu[j] / batch_size);
}
void Function7_bnBackward_gpu(float* dev_dout, float* dev_dxc, float* dev_dmu,
const int XN, const int DN, int batch_size) {
dim3 dimBlock(BLOCK_SIZE_X, BLOCK_SIZE_Y);
dim3 dimGrid((XN + BLOCK_SIZE_X - 1) / BLOCK_SIZE_X, (DN + BLOCK_SIZE_Y - 1) / BLOCK_SIZE_Y);
if (dimGrid.x > MAX_GRID_SIZE || dimGrid.y > MAX_GRID_SIZE) {
cout << "dimension of Grid exceeds " << MAX_GRID_SIZE << " in 'Function7_bnBackward_gpu'!" << endl;
}
Kernel_Function7_bnBackward << < dimGrid, dimBlock >> > (dev_dout, dev_dxc, dev_dmu, XN, DN, batch_size);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
/*LRN*/
__global__ void Kernel_Function_lrnForward1(float* dev_x, float* dev_X, float* dev_y4,
float myBias, float myAlpha, int myDepth_radius,
const int XN, const int XC, const int XH, const int XW) {
int i, j, k, l, n;
unsigned int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = XN*XC*XH*XW;
if (tid >= N) return;
idx4d(tid, XN, XC, XH, XW, i, j, k, l);
int idx = i*(XC*XH*XW) + j*(XH*XW) + k*(XW)+l;
dev_X[idx] = dev_x[idx];
float sum = 0;
int idx_n;
for (n = j - myDepth_radius; n <= j + myDepth_radius; n++) {
if (n < 0 || n >= XC) continue;
idx_n = i*(XC*XH*XW) + n*(XH*XW) + k*(XW)+l;
sum += powf(dev_x[idx_n], 2);
}
dev_y4[idx] = (myBias + myAlpha * sum);
}
__global__ void Kernel_Function_lrnForward2(float* dev_x, float* dev_y4,
float myBeta,
const int size) {
unsigned int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = size;
if (tid >= N) return;
dev_x[tid] /= powf(dev_y4[tid], myBeta);
}
void Function_lrnForward_gpu(float* dev_x, float* dev_X, float* dev_y4,
float myBias, float myAlpha, float myBeta, int myDepth_radius,
const int XN, const int XC, const int XH, const int XW) {
int size = XN*XC*XH*XW;
dim3 dimBlock1(BLOCK_SIZE);
dim3 dimGrid1((size + BLOCK_SIZE - 1) / BLOCK_SIZE);
if (dimGrid1.x > MAX_GRID_SIZE) {
cout << "dimension of Grid exceeds " << MAX_GRID_SIZE << " in 'Function_lrnForward_gpu'!" << endl;
}
Kernel_Function_lrnForward1 << < dimGrid1, dimBlock1 >> > (dev_x, dev_X, dev_y4, myBias, myAlpha, myDepth_radius, XN, XC, XH, XW);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
dim3 dimBlock2(BLOCK_SIZE);
dim3 dimGrid2((size + BLOCK_SIZE - 1) / BLOCK_SIZE);
if (dimGrid2.x > MAX_GRID_SIZE) {
cout << "dimension of Grid exceeds " << MAX_GRID_SIZE << " in 'Function_lrnForward_gpu'!" << endl;
}
Kernel_Function_lrnForward2 << < dimGrid2, dimBlock2 >> > (dev_x, dev_y4, myBeta, size);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
__global__ void Kernel_Function_lrnBackward1(float* dev_dout, float* dev_dout_new, float* dev_X, float* dev_y4,
float myAlpha, float myBeta, int myDepth_radius,
const int XN, const int XC, const int XH, const int XW) {
int i, j, k, l, n;
unsigned int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = XN*XC*XH*XW;
if (tid >= N) return;
idx4d(tid, XN, XC, XH, XW, i, j, k, l);
int idx = i*(XC*XH*XW) + j*(XH*XW) + k*(XW)+l;
float sum = 0;
int idx_n;
for (n = j - myDepth_radius; n <= j + myDepth_radius; n++) {
if (n < 0 || n >= XC) continue;
idx_n = i*(XC*XH*XW) + n*(XH*XW) + k*(XW)+l;
sum += (dev_X[idx_n] * dev_dout[idx_n]) / powf(dev_y4[idx_n], myBeta + 1);
}
dev_dout_new[idx] = dev_dout[idx] / powf(dev_y4[idx], myBeta) - 2.0*myAlpha*myBeta * dev_X[idx] * sum;
}
__global__ void Kernel_Function_lrnBackward2(float* dev_dout, float* dev_dout_new,
const int size) {
unsigned int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = size;
if (tid >= N) return;
dev_dout[tid] = dev_dout_new[tid];
}
void Function_lrnBackward_gpu(float* dev_dout, float* dev_dout_new, float* dev_X, float* dev_y4,
float myAlpha, float myBeta, int myDepth_radius,
const int XN, const int XC, const int XH, const int XW) {
int size = XN*XC*XH*XW;
dim3 dimBlock1(BLOCK_SIZE);
dim3 dimGrid1((size + BLOCK_SIZE - 1) / BLOCK_SIZE);
if (dimGrid1.x > MAX_GRID_SIZE) {
cout << "dimension of Grid exceeds " << MAX_GRID_SIZE << " in 'Function_lrnBackward_gpu'!" << endl;
}
Kernel_Function_lrnBackward1 << < dimGrid1, dimBlock1 >> > (dev_dout, dev_dout_new, dev_X, dev_y4, myAlpha, myBeta, myDepth_radius, XN, XC, XH, XW);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
dim3 dimBlock2(BLOCK_SIZE);
dim3 dimGrid2((size + BLOCK_SIZE - 1) / BLOCK_SIZE);
if (dimGrid2.x > MAX_GRID_SIZE) {
cout << "dimension of Grid exceeds " << MAX_GRID_SIZE << " in 'Function_lrnBackward_gpu'!" << endl;
}
Kernel_Function_lrnBackward2 << < dimGrid2, dimBlock2 >> > (dev_dout, dev_dout_new, size);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
/*accuracy*/
__global__ void Kernel_Function_acc(float* dev_predict, int* dev_label, int* dev_acc_binary,
int N, int C_label, int C_output, int H, int W) {
unsigned int tid = blockDim.x*blockIdx.x + threadIdx.x;
unsigned int N_ = N*C_label*H*W;
int i, j, k, l, j_, idx_label, idx_predict, idx_max;
float tmp;
while (tid < N_)
{
idx4d(tid, N, C_output, H, W, i, j, k, l);
for (j_ = 0; j_ < C_output; j_++)
{
idx_predict = i*(C_output*H*W) + j_*(H*W) + k*(W)+l;
if (j_ == 0) tmp = dev_predict[idx_predict], idx_max = 0;
else if (dev_predict[idx_predict] > tmp)
{
tmp = dev_predict[idx_predict];
idx_max = j_;
}
}
idx4d(tid, N, C_label, H, W, i, j, k, l);
idx_label = i*(C_label*H*W) + j*(H*W) + k*(W)+l;
if (dev_label[idx_label] == idx_max) dev_acc_binary[idx_label] = 1;
else dev_acc_binary[idx_label] = 0;
tid += gridDim.x*blockDim.x;
}
}
void Function_acc_gpu(float* dev_predict, int* dev_label, int* dev_acc_binary,
int* image_shape, int the_number_of_class) {
int N = image_shape[0];
int C_label = 1, C_output = the_number_of_class;
int H = image_shape[2];
int W = image_shape[3];
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
Kernel_Function_acc << < dimGrid, dimBlock >> > (dev_predict, dev_label, dev_acc_binary, N, C_label, C_output, H, W);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
__global__ void Kernel_Function_acc_dice(float* dev_predict, int* dev_label, int* dev_predict_binary, int label,
int N, int C_label, int C_output, int H, int W) {
unsigned int tid = blockDim.x*blockIdx.x + threadIdx.x;
unsigned int N_ = N*C_label*H*W;
int i, j, k, l, j_, idx_label, idx_predict, idx_max;
float tmp;
while (tid < N_)
{
idx4d(tid, N, C_output, H, W, i, j, k, l);
for (j_ = 0; j_ < C_output; j_++)
{
idx_predict = i*(C_output*H*W) + j_*(H*W) + k*(W)+l;
if (j_ == 0) tmp = dev_predict[idx_predict], idx_max = 0;
else if (dev_predict[idx_predict] > tmp)
{
tmp = dev_predict[idx_predict];
idx_max = j_;
}
}
idx4d(tid, N, C_label, H, W, i, j, k, l);
idx_label = i*(C_label*H*W) + j*(H*W) + k*(W)+l;
if (idx_max == label) dev_predict_binary[idx_label] = 1;
else dev_predict_binary[idx_label] = 0;
if (dev_label[idx_label] != 0 && dev_label[idx_label] != 255) dev_label[idx_label] = 1;
else dev_label[idx_label] = 0;
tid += gridDim.x*blockDim.x;
}
}
void Function_acc_dice_gpu(float* dev_predict, int* dev_label, int* dev_predict_binary, int label,
int* image_shape, int the_number_of_class) {
int N = image_shape[0];
int C_label = 1, C_output = the_number_of_class;
int H = image_shape[2];
int W = image_shape[3];
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
Kernel_Function_acc_dice << < dimGrid, dimBlock >> > (dev_predict, dev_label, dev_predict_binary, label, N, C_label, C_output, H, W);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
__global__ void Kernel_Function_acc_iou(float* dev_predict, int* dev_predict_index,
int N, int C_label, int C_output, int H, int W) {
unsigned int tid = blockDim.x*blockIdx.x + threadIdx.x;
unsigned int N_ = N*C_label*H*W;
int i, j, k, l, j_, idx_label, idx_predict, idx_max;
float tmp;
while (tid < N_)
{
idx4d(tid, N, C_output, H, W, i, j, k, l);
for (j_ = 0; j_ < C_output; j_++)
{
idx_predict = i*(C_output*H*W) + j_*(H*W) + k*(W)+l;
if (j_ == 0) tmp = dev_predict[idx_predict], idx_max = 0;
else if (dev_predict[idx_predict] > tmp)
{
tmp = dev_predict[idx_predict];
idx_max = j_;
}
}
idx4d(tid, N, C_label, H, W, i, j, k, l);
idx_label = i*(C_label*H*W) + j*(H*W) + k*(W)+l;
dev_predict_index[idx_label] = idx_max;
tid += gridDim.x*blockDim.x;
}
}
void Function_acc_iou_gpu(float* dev_predict, int* dev_predict_index,
int* image_shape, int the_number_of_class) {
int N = image_shape[0];
int C_label = 1, C_output = the_number_of_class;
int H = image_shape[2];
int W = image_shape[3];
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
Kernel_Function_acc_iou << < dimGrid, dimBlock >> > (dev_predict, dev_predict_index, N, C_label, C_output, H, W);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
int** Function_confusion_matrix(/*int** confusion_matrix, */int* predict, int* gt, int size, int the_number_of_class)
{
int** confusion_matrix = new int*[the_number_of_class];
for (int i = 0; i < the_number_of_class; i++) confusion_matrix[i] = new int[the_number_of_class];
for (int i = 0; i < the_number_of_class; i++) memset(confusion_matrix[i], 0, the_number_of_class * sizeof(int));
//row(i):ground-truth image, column(j):predicted image
for (int i = 0; i < the_number_of_class; i++)
{
for (int j = 0; j < the_number_of_class; j++)
{
for (int pixel = 0; pixel < size; pixel++)
{
if (gt[pixel] != 255 && gt[pixel] != 0)
{
if (gt[pixel] == i + 1 && predict[pixel] == j + 1) confusion_matrix[i][j] += 1;
}
}
}
}
return confusion_matrix;
}
void accuracy_top5(float* x, const int size)
{
set<int> index_top5;
float temp = 0;
int index;
for (int n = 0; n < 5; n++)
{
temp = 0;
for (int i = 0; i < size; i++)
{
if (x[i] > temp && index_top5.find(i) == index_top5.end())
{
temp = x[i];
index = i;
}
}
index_top5.insert(index);
}
set<int>::iterator iter;
for (iter = index_top5.begin(); iter != index_top5.end(); iter++)
{
cout << "index of top5 : " << *iter << ", score : " << x[*iter] * 100 << "(%)" << endl;
}
}
/*concat*/
__global__ void Kernel_Function_concatForward(float* dev_out, float* dev_x1, float* dev_x2,
int N, int C1, int C2, int H, int W)
{
unsigned int tid = blockDim.x*blockIdx.x + threadIdx.x;
int C = C1 + C2;
int N_max = N*C*H*W;
int i, j, k, l, idx, idx_x1, idx_x2;
while (tid < N_max)
{
idx4d(tid, N, C, H, W, i, j, k, l);
idx = i*(C*H*W) + j*(H*W) + k*(W)+l;
idx_x1 = i*(C1*H*W) + j*(H*W) + k*(W)+l;
idx_x2 = i*(C2*H*W) + (j - C1)*(H*W) + k*(W)+l;
if (j < C1)
{
dev_out[idx] = dev_x1[idx_x1];
}
else
{
dev_out[idx] = dev_x2[idx_x2];
}
tid += gridDim.x*blockDim.x;
}
}
void Function_concatForward_gpu(float* dev_out, float* dev_x1, float* dev_x2,
int N, int C1, int C2, int H, int W)
{
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
Kernel_Function_concatForward << < dimGrid, dimBlock >> > (dev_out, dev_x1, dev_x2, N, C1, C2, H, W);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
__global__ void Kernel_Function_concatBackward(float* dev_dout1, float* dev_dout2, float* dev_dout,
int N, int C1, int C2, int H, int W)
{
unsigned int tid = blockDim.x*blockIdx.x + threadIdx.x;
int C = C1 + C2;
int N_max = N*C*H*W;
int i, j, k, l, idx, idx_dout1, idx_dout2;
while (tid < N_max)
{
idx4d(tid, N, C, H, W, i, j, k, l);
idx = i*(C*H*W) + j*(H*W) + k*(W)+l;
idx_dout1 = i*(C1*H*W) + j*(H*W) + k*(W)+l;
idx_dout2 = i*(C2*H*W) + (j - C1)*(H*W) + k*(W)+l;
if (j < C1)
{
dev_dout1[idx_dout1] = dev_dout[idx];
}
else
{
dev_dout2[idx_dout2] = dev_dout[idx];
}
tid += gridDim.x*blockDim.x;
}
}
void Function_concatBackward_gpu(float* dev_dout1, float* dev_dout2, float* dev_dout,
int N, int C1, int C2, int H, int W)
{
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
Kernel_Function_concatBackward << < dimGrid, dimBlock >> > (dev_dout1, dev_dout2, dev_dout, N, C1, C2, H, W);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
/*optimizer*/
__global__ void Kernel_Function_update_sgd(float lr, float* dev_parameter, float* dev_gradient, int size) {
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = size;
while (tid < N)
{
dev_parameter[tid] -= lr * dev_gradient[tid];
tid += gridDim.x*blockDim.x;
}
}
void Function_update_sgd_gpu(float lr, float* dev_parameter, float* dev_gradient, int size)
{
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
Kernel_Function_update_sgd << < dimGrid, dimBlock >> > (lr, dev_parameter, dev_gradient, size);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
void Function_update_sgd_cpu(float lr, float* parameter, float* gradient, int size)
{
for (int i = 0; i < size; i++)
parameter[i] -= lr * gradient[i];
}
__global__ void Kernel_Function_update_rmsprop(float lr, float dr, float* dev_parameter, float* dev_gradient, float* dev_h, int size) {
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = size;
while (tid < N)
{
dev_h[tid] *= dr;
dev_h[tid] += (1 - dr) *dev_gradient[tid] * dev_gradient[tid];
dev_parameter[tid] -= lr * dev_gradient[tid] / (sqrt(dev_h[tid]) + 1e-7);
tid += gridDim.x*blockDim.x;
}
}
void Function_update_rmsprop_gpu(float lr, float dr, float* dev_parameter, float* dev_gradient, float* dev_h, int size) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
Kernel_Function_update_rmsprop << < dimGrid, dimBlock >> > (lr, dr, dev_parameter, dev_gradient, dev_h, size);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
//////////////////////////////////////////////////////// src ver2 ////////////////////////////////////////////////////////
//new and delete
template <typename _type>
void new_cpu(_type* &src, int buffer) {
src = new _type[buffer];
memset(src, 0, buffer * sizeof(_type));
}
template <typename _type>
void delete_cpu(_type* &src) {
delete[] src;
src = NULL;
}
template <typename _type>
void new_gpu(_type* &src, int buffer) {
gpuErrchk(hipMalloc((void**)&src, buffer * sizeof(_type)));
gpuErrchk(hipMemset(src, 0, buffer * sizeof(_type)));
}
template <typename _type>
void delete_gpu(_type* &src) {
gpuErrchk(hipFree(src));
src = NULL;
}
float* padding(float* x, int pad, int N, int C, int H, int W) {
int idx, idx_pad;
int H_pad = H + 2 * pad;
int W_pad = W + 2 * pad;
int buffer = N*C*H_pad*W_pad;
float* x_pad = NULL;
new_cpu<float>(x_pad, buffer);
for (int i = 0; i < N; i++) {
for (int j = 0; j < C; j++) {
for (int k = 0; k < H; k++) {
for (int l = 0; l < W; l++) {
idx = i*(C*H*W) + j*(H*W) + k*(W)+l;
idx_pad = i*(C*H_pad*W_pad) + j*(H_pad*W_pad) + (k + pad)*(W_pad)+(l + pad);
x_pad[idx_pad] = x[idx];
}
}
}
}
delete_cpu<float>(x);
return x_pad;
}
__global__ void kernel_padding_forward(float* x_pad, float* x, int pad,
int N, int C, int H, int W,
int H_pad, int W_pad) {
int i, j, k, l;
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int _N = N*C*H*W;
int idx_pad, idx;
while (tid < _N)
{
idx4d(tid, N, C, H, W, i, j, k, l);
idx_pad = i*(C*H_pad*W_pad) + j*(H_pad*W_pad) + (k + pad)*(W_pad)+(l + pad);
idx = i*(C*H*W) + j*(H*W) + k*(W)+l;
x_pad[idx_pad] = x[idx];
tid += gridDim.x*blockDim.x;
}
}
float* padding_gpu(float* x, int pad, int N, int C, int H, int W) {
int H_pad = H + 2 * pad;
int W_pad = W + 2 * pad;
int buffer = N*C*H_pad*W_pad;
float* x_pad = NULL;
new_gpu<float>(x_pad, buffer);
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
kernel_padding_forward << < dimGrid, dimBlock >> > (x_pad, x, pad, N, C, H, W, H_pad, W_pad);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
delete_gpu<float>(x);
return x_pad;
}
float* padding(float* dx, int pad, int N, int C, int H, int W, int stride)
{
int dH = H + 2 * pad + stride - 1;
int dW = W + 2 * pad + stride - 1;
int buffer = N*C*H*W;
float* dx_pad = NULL;
new_cpu<float>(dx_pad, buffer);
int idx_dx, idx_dx_pad;
for (int i = 0; i < N; i++) {
for (int j = 0; j < C; j++) {
for (int k = 0; k < H; k++) {
for (int l = 0; l < W; l++) {
idx_dx_pad = i*(C*H*W) + j*(H*W) + k*(W)+l;
idx_dx = i*(C*dH*dW) + j*(dH*dW) + (k + pad)*(dW)+(l + pad);
dx_pad[idx_dx_pad] = dx[idx_dx];
}
}
}
}
delete_cpu<float>(dx);
return dx_pad;
}
__global__ void kernel_padding_backward(float* dx_pad, float* dx, int pad,
int N, int C, int H, int W,
int dH, int dW) {
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int _N = N*C*H*W;
int i = 0, j = 0, k = 0, l = 0, idx_dx_pad, idx_dx;
while (tid < _N)
{
idx4d(tid, N, C, H, W, i, j, k, l);
idx_dx_pad = i*(C*H*W) + j*(H*W) + k*(W)+l;
idx_dx = i*(C*dH*dW) + j*(dH*dW) + (k + pad)*(dW)+(l + pad);
dx_pad[idx_dx_pad] = dx[idx_dx];
tid += gridDim.x*blockDim.x;
}
}
float* padding_gpu(float* dx, int pad, int N, int C, int H, int W, int stride) {
int dH = H + 2 * pad + stride - 1;
int dW = W + 2 * pad + stride - 1;
int buffer = N*C*H*W;
float* dx_pad = NULL;
new_gpu<float>(dx_pad, buffer);
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
kernel_padding_backward << < dimGrid, dimBlock >> > (dx_pad, dx, pad, N, C, H, W, dH, dW);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
delete_gpu<float>(dx);
return dx_pad;
}
float* stride_forward(float* img, int stride,
int XN, int XC, int FH, int FW, int OH, int OW, int XH, int XW) {
int buffer = XN*XC*FH*FW*OH*OW;
float* col = NULL;
new_cpu<float>(col, buffer);
int y_max, x_max;
int idx_col, idx_img;
for (int i = 0; i < XN; i++) {
for (int j = 0; j < XC; j++) {
for (int k = 0; k < FH; k++) {
y_max = k + stride*OH;
for (int l = 0; l < FW; l++) {
x_max = l + stride*OW;
for (int a = k, int m = 0; a < y_max; a = a + stride, m++) {
for (int b = l, int n = 0; b < x_max; b = b + stride, n++) {
idx_col = i*(XC*FH*FW*OH*OW) + j*(FH*FW*OH*OW) + k*(FW*OH*OW) + l*(OH*OW) + m*(OW)+n;
idx_img = i*(XC*XH*XW) + j*(XH*XW) + a*(XW)+b;
col[idx_col] = img[idx_img];
}
}
}
}
}
}
delete_cpu<float>(img);
return col;
}
__global__ void kernel_stride_forward(float* col, float* img, int stride,
int XN, int XC, int FH, int FW, int OH, int OW, int XH, int XW) {
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = XN*XC*FH*FW*OH*OW;
int i, j, k, l, m, n, a, b;
int idx_col;
int idx_img;
while (tid < N)
{
idx6d(tid, XN, XC, FH, FW, OH, OW, i, j, k, l, m, n);
a = k + m*stride;
b = l + n*stride;
idx_col = i*(XC*FH*FW*OH*OW) + j*(FH*FW*OH*OW) + k*(FW*OH*OW) + l*(OH*OW) + m*(OW)+n;
idx_img = i*(XC*XH*XW) + j*(XH*XW) + a*(XW)+b;
col[idx_col] = img[idx_img];
tid += gridDim.x * blockDim.x;
}
}
float* stride_forward_gpu(float* img, int stride,
int XN, int XC, int FH, int FW, int OH, int OW, int XH, int XW) {
int buffer = XN*XC*FH*FW*OH*OW;
float* col = NULL;
new_gpu<float>(col, buffer);
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
kernel_stride_forward << < dimGrid, dimBlock >> > (col, img, stride, XN, XC, FH, FW, OH, OW, XH, XW);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
delete_gpu<float>(img);
return col;
}
float* stride_backward(float* col, int stride,
int XN, int XC, int FH, int FW, int OH, int OW, int XH, int XW) {
int buffer = XN*XC*XH*XW;
float* img = NULL;
new_cpu<float>(img, buffer);
int y_max, x_max;
int idx_img, idx_col;
for (int i = 0; i < XN; i++) {
for (int j = 0; j < XC; j++) {
for (int k = 0; k < FH; k++) {
y_max = k + stride*OH;
for (int l = 0; l < FW; l++) {
x_max = l + stride*OW;
for (int a = k, int m = 0; a < y_max; a = a + stride, m++) {
for (int b = l, int n = 0; b < x_max; b = b + stride, n++) {
idx_img = i*(XC*XH*XW) + j*(XH*XW) + a*(XW)+b;
idx_col = i*(XC*FH*FW*OH*OW) + j*(FH*FW*OH*OW) + k*(FW*OH*OW) + l*(OH*OW) + m*(OW)+n;
img[idx_img] += col[idx_col];
}
}
}
}
}
}
delete_cpu<float>(col);
return img;
}
__global__ void kernel_stride_backward(float* img, float* col, int stride,
int XN, int XC, int FH, int FW, int OH, int OW, int XH, int XW) {
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = XN*XC*XH*XW;
int i, j, a, b, idx_img, idx_col;
int k, l, m, n, temp;
while (tid < N)
{
idx4d(tid, XN, XC, XH, XW, i, j, a, b);
idx_img = i*(XC*XH*XW) + j*(XH*XW) + a*(XW)+b;
for (k = 0; k < FH && k <= a; k++)
{
m = (a - k) / stride;
temp = k + stride*m;
if (temp != a || m >= OH)
continue;
for (l = 0; l < FW && l <= b; l++)
{
n = (b - l) / stride;
temp = l + stride*n;
if (temp != b || n >= OW)
continue;
idx_col = i*(XC*FH*FW*OH*OW) + j*(FH*FW*OH*OW) + k*(FW*OH*OW) + l*(OH*OW) + m*(OW)+n;
img[idx_img] += col[idx_col];
}
}
tid += gridDim.x*blockDim.x;
}
}
float* stride_backward_gpu(float* col, int stride,
int XN, int XC, int FH, int FW, int OH, int OW, int XH, int XW) {
int buffer = XN*XC*XH*XW;
float* img = NULL;
new_gpu<float>(img, buffer);
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
kernel_stride_backward << < dimGrid, dimBlock >> > (img, col, stride, XN, XC, FH, FW, OH, OW, XH, XW);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
delete_gpu<float>(col);
return img;
}
//dim=6
float* transpose(float* x,
int _dim0, int _dim1, int _dim2, int _dim3, int _dim4, int _dim5,
int idx_new_dim0, int idx_new_dim1, int idx_new_dim2, int idx_new_dim3, int idx_new_dim4, int idx_new_dim5) {
int old_dims[6] = { _dim0, _dim1, _dim2, _dim3, _dim4, _dim5 };
int new_dims[6] = { 0 };
new_dims[0] = old_dims[idx_new_dim0];
new_dims[1] = old_dims[idx_new_dim1];
new_dims[2] = old_dims[idx_new_dim2];
new_dims[3] = old_dims[idx_new_dim3];
new_dims[4] = old_dims[idx_new_dim4];
new_dims[5] = old_dims[idx_new_dim5];
int i = 0, j = 0, k = 0, l = 0, m = 0, n = 0;
int* old_idx[6] = { &i, &j, &k, &l, &m, &n };
int* i_new = old_idx[idx_new_dim0];
int* j_new = old_idx[idx_new_dim1];
int* k_new = old_idx[idx_new_dim2];
int* l_new = old_idx[idx_new_dim3];
int* m_new = old_idx[idx_new_dim4];
int* n_new = old_idx[idx_new_dim5];
int buffer = _dim0*_dim1*_dim2*_dim3*_dim4*_dim5;
float* x_transpose = NULL;
new_cpu<float>(x_transpose, buffer);
int idx, idx_transpose;
for (i = 0; i < _dim0; i++) {
for (j = 0; j < _dim1; j++) {
for (k = 0; k < _dim2; k++) {
for (l = 0; l < _dim3; l++) {
for (m = 0; m < _dim4; m++) {
for (n = 0; n < _dim5; n++) {
idx_transpose = (*i_new) * (new_dims[1] * new_dims[2] * new_dims[3] * new_dims[4] * new_dims[5])
+ (*j_new) * (new_dims[2] * new_dims[3] * new_dims[4] * new_dims[5])
+ (*k_new) * (new_dims[3] * new_dims[4] * new_dims[5])
+ (*l_new) * (new_dims[4] * new_dims[5])
+ (*m_new) * (new_dims[5])
+ (*n_new);
idx = i*(_dim1*_dim2*_dim3*_dim4*_dim5) + j*(_dim2*_dim3*_dim4*_dim5) + k*(_dim3*_dim4*_dim5) + l*(_dim4*_dim5) + m*(_dim5)+n;
x_transpose[idx_transpose] = x[idx];
}
}
}
}
}
}
delete_cpu<float>(x);
return x_transpose;
}
__global__ void kernel_transpose_6(float* x_transpose, float* x,
int _dim0, int _dim1, int _dim2, int _dim3, int _dim4, int _dim5,
int idx_new_dim0, int idx_new_dim1, int idx_new_dim2, int idx_new_dim3, int idx_new_dim4, int idx_new_dim5) {
int old_dims[6] = { _dim0, _dim1, _dim2, _dim3, _dim4, _dim5 };
int new_dims[6] = { 0 };
new_dims[0] = old_dims[idx_new_dim0];
new_dims[1] = old_dims[idx_new_dim1];
new_dims[2] = old_dims[idx_new_dim2];
new_dims[3] = old_dims[idx_new_dim3];
new_dims[4] = old_dims[idx_new_dim4];
new_dims[5] = old_dims[idx_new_dim5];
int i = 0, j = 0, k = 0, l = 0, m = 0, n = 0;
int* old_idx[6] = { &i, &j, &k, &l, &m, &n };
int* i_new = old_idx[idx_new_dim0];
int* j_new = old_idx[idx_new_dim1];
int* k_new = old_idx[idx_new_dim2];
int* l_new = old_idx[idx_new_dim3];
int* m_new = old_idx[idx_new_dim4];
int* n_new = old_idx[idx_new_dim5];
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = _dim0*_dim1*_dim2*_dim3*_dim4*_dim5;
int idx_transpose;
int idx;
while (tid < N)
{
idx6d(tid, _dim0, _dim1, _dim2, _dim3, _dim4, _dim5, i, j, k, l, m, n);
idx_transpose = (*i_new) * (new_dims[1] * new_dims[2] * new_dims[3] * new_dims[4] * new_dims[5])
+ (*j_new) * (new_dims[2] * new_dims[3] * new_dims[4] * new_dims[5])
+ (*k_new) * (new_dims[3] * new_dims[4] * new_dims[5])
+ (*l_new) * (new_dims[4] * new_dims[5])
+ (*m_new) * (new_dims[5])
+ (*n_new);
idx = i*(_dim1*_dim2*_dim3*_dim4*_dim5) + j*(_dim2*_dim3*_dim4*_dim5) + k*(_dim3*_dim4*_dim5) + l*(_dim4*_dim5) + m*(_dim5)+n;
x_transpose[idx_transpose] = x[idx];
tid += gridDim.x *blockDim.x;
}
}
float* transpose_gpu(float* x,
int _dim0, int _dim1, int _dim2, int _dim3, int _dim4, int _dim5,
int idx_new_dim0, int idx_new_dim1, int idx_new_dim2, int idx_new_dim3, int idx_new_dim4, int idx_new_dim5) {
int buffer = _dim0*_dim1*_dim2*_dim3*_dim4*_dim5;
float* x_transpose = NULL;
new_gpu<float>(x_transpose, buffer);
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
kernel_transpose_6 << < dimGrid, dimBlock >> > (x_transpose, x,
_dim0, _dim1, _dim2, _dim3, _dim4, _dim5,
idx_new_dim0, idx_new_dim1, idx_new_dim2, idx_new_dim3, idx_new_dim4, idx_new_dim5);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
delete_gpu<float>(x);
return x_transpose;
}
//dim=4
float* transpose(float* x,
int _dim0, int _dim1, int _dim2, int _dim3,
int idx_new_dim0, int idx_new_dim1, int idx_new_dim2, int idx_new_dim3) {
int old_dims[4] = { _dim0, _dim1, _dim2, _dim3 };
int new_dims[4] = { 0 };
new_dims[0] = old_dims[idx_new_dim0];
new_dims[1] = old_dims[idx_new_dim1];
new_dims[2] = old_dims[idx_new_dim2];
new_dims[3] = old_dims[idx_new_dim3];
int i = 0, j = 0, k = 0, l = 0;
int* old_idx[4] = { &i, &j, &k, &l };
int* i_new = old_idx[idx_new_dim0];
int* j_new = old_idx[idx_new_dim1];
int* k_new = old_idx[idx_new_dim2];
int* l_new = old_idx[idx_new_dim3];
int buffer = _dim0*_dim1*_dim2*_dim3;
float* x_transpose = NULL;
new_cpu<float>(x_transpose, buffer);
int idx, idx_transpose;
for (i = 0; i < _dim0; i++) {
for (j = 0; j < _dim1; j++) {
for (k = 0; k < _dim2; k++) {
for (l = 0; l < _dim3; l++) {
idx_transpose = (*i_new) * (new_dims[1] * new_dims[2] * new_dims[3])
+ (*j_new) * (new_dims[2] * new_dims[3])
+ (*k_new) * (new_dims[3])
+ (*l_new);
idx = i*(_dim1*_dim2*_dim3) + j*(_dim2*_dim3) + k*(_dim3)+l;
x_transpose[idx_transpose] = x[idx];
}
}
}
}
delete_cpu<float>(x);
return x_transpose;
}
__global__ void kernel_transpose_4(float* x_transpose, float* x,
int _dim0, int _dim1, int _dim2, int _dim3,
int idx_new_dim0, int idx_new_dim1, int idx_new_dim2, int idx_new_dim3) {
int old_dims[6] = { _dim0, _dim1, _dim2, _dim3 };
int new_dims[6] = { 0 };
new_dims[0] = old_dims[idx_new_dim0];
new_dims[1] = old_dims[idx_new_dim1];
new_dims[2] = old_dims[idx_new_dim2];
new_dims[3] = old_dims[idx_new_dim3];
int i = 0, j = 0, k = 0, l = 0;
int* old_idx[6] = { &i, &j, &k, &l };
int* i_new = old_idx[idx_new_dim0];
int* j_new = old_idx[idx_new_dim1];
int* k_new = old_idx[idx_new_dim2];
int* l_new = old_idx[idx_new_dim3];
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = _dim0*_dim1*_dim2*_dim3;
int idx_transpose;
int idx;
while (tid < N)
{
idx4d(tid, _dim0, _dim1, _dim2, _dim3, i, j, k, l);
idx_transpose = (*i_new) * (new_dims[1] * new_dims[2] * new_dims[3])
+ (*j_new) * (new_dims[2] * new_dims[3])
+ (*k_new) * (new_dims[3])
+ (*l_new);
idx = i*(_dim1*_dim2*_dim3) + j*(_dim2*_dim3) + k*(_dim3)+l;
x_transpose[idx_transpose] = x[idx];
tid += gridDim.x *blockDim.x;
}
}
float* transpose_gpu(float* x,
int _dim0, int _dim1, int _dim2, int _dim3,
int idx_new_dim0, int idx_new_dim1, int idx_new_dim2, int idx_new_dim3) {
int buffer = _dim0*_dim1*_dim2*_dim3;
float* x_transpose = NULL;
new_gpu<float>(x_transpose, buffer);
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
kernel_transpose_4 << < dimGrid, dimBlock >> > (x_transpose, x,
_dim0, _dim1, _dim2, _dim3,
idx_new_dim0, idx_new_dim1, idx_new_dim2, idx_new_dim3);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
delete_gpu<float>(x);
return x_transpose;
}
//dim=2
float* transpose(float* x,
int _dim0, int _dim1) {
int buffer = _dim0*_dim1;
float* x_transpose = NULL;
new_cpu<float>(x_transpose, buffer);
int idx, idx_transpose;
for (int i = 0; i < _dim0; i++) {
for (int j = 0; j < _dim1; j++) {
idx = i*_dim1 + j;
idx_transpose = j*_dim0 + i;
x_transpose[idx_transpose] = x[idx];
}
}
delete_cpu<float>(x);
return x_transpose;
}
__global__ void kernel_transpose_2(float* x_transpose, float* x,
int _dim0, int _dim1) {
unsigned int i = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int j = blockDim.y * blockIdx.y + threadIdx.y;
if (i >= _dim0 || j >= _dim1) return;
int idx, idx_transpose;
idx = i*_dim1 + j;
idx_transpose = j*_dim0 + i;
x_transpose[idx_transpose] = x[idx];
}
float* transpose_gpu(float* x,
int _dim0, int _dim1) {
int buffer = _dim0*_dim1;
float* x_transpose = NULL;
new_gpu<float>(x_transpose, buffer);
dim3 dimBlock(BLOCK_SIZE_X, BLOCK_SIZE_Y);
dim3 dimGrid((_dim0 + BLOCK_SIZE_X - 1) / BLOCK_SIZE_X, (_dim1 + BLOCK_SIZE_Y - 1) / BLOCK_SIZE_Y);
kernel_transpose_2 << < dimGrid, dimBlock >> > (x_transpose, x, _dim0, _dim1);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
delete_gpu<float>(x);
return x_transpose;
}
float* dot(float* A, float* B,
int r, int c, int n) {
int buffer = r*c;
float* out = NULL;
new_cpu<float>(out, buffer);
float temp;
for (int i = 0; i < r; i++) {
for (int j = 0; j < c; j++) {
temp = 0.0;
for (int k = 0; k < n; k++) {
temp += A[i*n + k] * B[k*c + j];
}
out[i*c + j] = temp;
}
}
return out;
}
__global__ void kernel_dot(float* out, float* A, float* B,
int r, int c, int n) {
unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int N = r*c;
int i, j;
float temp, A_val, B_val;
while (tid < N)
{
temp = 0.0;
A_val = 0.0;
B_val = 0.0;
idx2d(tid, r, c, i, j);
for (int k = 0; k < n; k++) {
A_val = A[i*n + k];
B_val = B[k*c + j];
temp += A_val*B_val;
}
out[i*c + j] = temp;
tid += gridDim.x*blockDim.x;
}
}
float* dot_gpu(float* A, float* B,
int r, int c, int n) {
int buffer = r*c;
float* out = NULL;
new_gpu<float>(out, buffer);
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
kernel_dot << < dimGrid, dimBlock >> > (out, A, B, r, c, n);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
return out;
}
void _dot(float* out, float* A, float* B,
int r, int c, int n) {
float temp;
for (int i = 0; i < r; i++) {
for (int j = 0; j < c; j++) {
temp = 0.0;
for (int k = 0; k < n; k++) {
temp += A[i*n + k] * B[k*c + j];
}
out[i*c + j] = temp;
}
}
}
void _dot_gpu(float* out, float* A, float* B,
int r, int c, int n) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
kernel_dot << < dimGrid, dimBlock >> > (out, A, B, r, c, n);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
void sum_forward(float* x, float* b,
int r, int c) {
for (int i = 0; i < r; i++) {
for (int j = 0; j < c; j++) {
x[i*c + j] += b[j];
}
}
}
__global__ void kernel_sum_forward(float* x, float* b,
int r, int c) {
unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x;
int N = r*c;
int i = 0, j = 0;
while (tid < N)
{
idx2d(tid, r, c, i, j);
x[i*c + j] += b[j];
tid += gridDim.x*blockDim.x;
}
}
void sum_forward_gpu(float* x, float* b,
int r, int c) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
kernel_sum_forward << < dimGrid, dimBlock >> > (x, b, r, c);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
void sum_backward(float* db, float* dout,
int r, int c) {
memset(db, 0, c * sizeof(float));
for (int j = 0; j < c; j++) {
for (int i = 0; i < r; i++) {
db[j] += dout[i*c + j];
}
}
}
__global__ void kernel_sum_backward(float* db, float* dout,
int r, int c) {
unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x;
int N = c;
while (tid < N)
{
for (int i = 0; i < r; i++) {
db[tid] += dout[i*c + tid];
}
tid += gridDim.x*blockDim.x;
}
}
void sum_backward_gpu(float* db, float* dout,
int r, int c) {
hipMemset(db, 0, c * sizeof(float));
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
kernel_sum_backward << < dimGrid, dimBlock >> > (db, dout, r, c);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
template <unsigned int blockSize>
__device__ void warpReduce(volatile float* sdata, int tid)
{
if (blockSize >= 64) sdata[tid] += sdata[tid + 32];
if (blockSize >= 32) sdata[tid] += sdata[tid + 16];
if (blockSize >= 16) sdata[tid] += sdata[tid + 8];
if (blockSize >= 8) sdata[tid] += sdata[tid + 4];
if (blockSize >= 4) sdata[tid] += sdata[tid + 2];
if (blockSize >= 2) sdata[tid] += sdata[tid + 1];
}
__global__ void kernel_sum_backward_opt1(float* sum, float* dout, int r, int c) {
__shared__ float sdata[(BLOCK_SIZE_opt / 2)];
unsigned int tid = threadIdx.x;
unsigned int i = (blockDim.x * 2) * blockIdx.x + threadIdx.x;
//if (i >= r) return;
for (int j = 0; j < c; j++) {
sdata[tid] = dout[i*c + j] + dout[(i + blockDim.x)*c + j];
__syncthreads();
if (blockDim.x >= 512) {
if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads();
}
if (blockDim.x >= 256) {
if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads();
}
if (blockDim.x >= 128) {
if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads();
}
if (tid < 32) warpReduce<BLOCK_SIZE_opt / 2>(sdata, tid);
if (tid == 0) sum[blockIdx.x*c + j] = sdata[0];
__syncthreads();
}
}
__global__ void Kernel_Sum_backward_opt2(float* db, float* sum, int r_sum, int c) {
unsigned int j = blockDim.x * blockIdx.x + threadIdx.x;
if (j >= c) return;
float temp = 0;
for (int i = 0; i < r_sum; i++) {
temp += sum[i*c + j];
}
db[j] = temp;
}
void sum_backward_gpu(float* db, float* dout,
int r, int c, bool use_sharedMemory)
{
int buffer = (r + BLOCK_SIZE_opt - 1) / BLOCK_SIZE_opt * c;
float* sum = NULL;
new_gpu<float>(sum, buffer);
dim3 dimBlock1(BLOCK_SIZE_opt / 2); //halve the number of threads
dim3 dimGrid1((r + BLOCK_SIZE_opt - 1) / BLOCK_SIZE_opt);
kernel_sum_backward_opt1 << < dimGrid1, dimBlock1 >> > (sum, dout, r, c);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
int r_sum = buffer / c;
dim3 dimBlock2(BLOCK_SIZE_opt);
dim3 dimGrid2((c + BLOCK_SIZE_opt - 1) / BLOCK_SIZE_opt);
Kernel_Sum_backward_opt2 << < dimGrid2, dimBlock2 >> > (db, sum, r_sum, c);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
delete_gpu<float>(sum);
}
float* max_poolingForward(int* argMax, float* col,
int r, int c)
{
int buffer = r;
float* out = NULL;
new_cpu<float>(out, buffer);
new_cpu<int>(argMax, buffer);
float temp;
int idx;
for (int i = 0; i < r; i++) {
idx = 0;
temp = col[i*c + 0];
for (int j = 1; j < c; j++) {
if (col[i*c + j] > temp) {
temp = col[i*c + j];
idx = j;
}
}
argMax[i] = idx;
out[i] = temp;
}
delete_cpu<float>(col);
return out;
}
float* max_poolingForward(float* col,
int r, int c)
{
int buffer = r;
float* out = NULL;
new_cpu<float>(out, buffer);
float temp;
for (int i = 0; i < r; i++) {
temp = col[i*c + 0];
for (int j = 1; j < c; j++) {
if (col[i*c + j] > temp) {
temp = col[i*c + j];
}
}
out[i] = temp;
}
delete_cpu<float>(col);
return out;
}
float* avg_poolingForward(float* col,
int r, int c)
{
int buffer = r;
float* out = NULL;
new_cpu<float>(out, buffer);
float sum;
for (int i = 0; i < r; i++) {
sum = 0.0;
for (int j = 0; j < c; j++) {
sum += col[i*c + j];
}
out[i] = sum / c;
}
delete_cpu<float>(col);
return out;
}
float* max_poolingBackward(int* argMax, float* dout,
int r, int c) {
int buffer = r*c;
float* dcol = NULL;
new_cpu<float>(dcol, buffer);
for (int i = 0; i < r; i++) {
dcol[i*c + argMax[i]] = dout[i];
}
delete_cpu<float>(dout);
delete_cpu<int>(argMax);
return dcol;
}
float* avg_poolingBackward(float* dout,
int r, int c)
{
int buffer = r*c;
float* dcol = NULL;
new_cpu<float>(dcol, buffer);
for (int i = 0; i < r; i++) {
for (int j = 0; j < c; j++) {
dcol[i*c + j] = dout[i] / c;
}
}
delete_cpu<float>(dout);
return dcol;
}
__global__ void kernel_max_poolingForward_training(float* out, int* argMax, float* col,
int r, int c) {
unsigned int i = blockDim.x * blockIdx.x + threadIdx.x;
int N = r;
int idx;
float temp;
while (i < N)
{
temp = col[i*c + 0];
idx = 0;
for (int j = 1; j < c; j++) {
if (col[i*c + j] > temp) {
temp = col[i*c + j];
idx = j;
}
}
argMax[i] = idx;
out[i] = temp;
i += gridDim.x*blockDim.x;
}
}
float* max_poolingForward_gpu(int* argMax, float* col,
int r, int c) {
int buffer = r;
float* out = NULL;
new_gpu<float>(out, buffer);
new_gpu<int>(argMax, buffer);
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
kernel_max_poolingForward_training << < dimGrid, dimBlock >> > (out, argMax, col, r, c);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
delete_gpu<float>(col);
return out;
}
__global__ void kernel_max_poolingForward_inference(float* out, float* col,
int r, int c) {
unsigned int i = blockDim.x * blockIdx.x + threadIdx.x;
int N = r;
float temp;
while (i < N)
{
temp = col[i*c + 0];
for (int j = 1; j < c; j++) {
if (col[i*c + j] > temp) {
temp = col[i*c + j];
}
}
out[i] = temp;
i += gridDim.x*blockDim.x;
}
}
float* max_poolingForward_gpu(float* col,
int r, int c) {
int buffer = r;
float* out = NULL;
new_gpu<float>(out, buffer);
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
kernel_max_poolingForward_inference << < dimGrid, dimBlock >> > (out, col, r, c);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
delete_gpu<float>(col);
return out;
}
__global__ void kernel_avg_poolingForward(float* out, float* col,
int r, int c) {
unsigned int i = blockDim.x * blockIdx.x + threadIdx.x;
int N = r;
float sum;
while (i < N)
{
sum = 0.0;
for (int j = 0; j < c; j++) {
sum += col[i*c + j];
}
out[i] = sum / c;
i += gridDim.x*blockDim.x;
}
}
float* avg_poolingForward_gpu(float* col,
int r, int c) {
int buffer = r;
float* out = NULL;
new_gpu<float>(out, buffer);
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
kernel_avg_poolingForward << < dimGrid, dimBlock >> > (out, col, r, c);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
delete_gpu<float>(col);
return out;
}
__global__ void kernel_max_poolingBackward(float* dcol, int* argMax, float* dout,
int r, int c) {
unsigned int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = r*c;
int i, j;
while (tid < N)
{
idx2d(tid, r, c, i, j);
dcol[i*c + j] = 0;
dcol[i*c + (argMax[i])] = dout[i];
tid += gridDim.x*blockDim.x;
}
}
float* max_poolingBackward_gpu(int* argMax, float* dout,
int r, int c) {
int buffer = r*c;
float* dcol = NULL;
new_gpu<float>(dcol, buffer);
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
kernel_max_poolingBackward << < dimGrid, dimBlock >> > (dcol, argMax, dout, r, c);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
delete_gpu<float>(dout);
delete_gpu<int>(argMax);
return dcol;
}
__global__ void kernel_avg_poolingBackward(float* dcol, float* dout,
int r, int c) {
unsigned int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = r*c;
int i, j;
while (tid < N)
{
idx2d(tid, r, c, i, j);
dcol[i*c + j] = dout[i] / c;
tid += gridDim.x*blockDim.x;
}
}
float* avg_poolingBackward_gpu(float* dout,
int r, int c) {
int buffer = r*c;
float* dcol = NULL;
new_gpu<float>(dcol, buffer);
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
kernel_avg_poolingBackward << < dimGrid, dimBlock >> > (dcol, dout, r, c);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
delete_gpu<float>(dout);
return dcol;
}
__global__ void kernel_reluForward_training(float* x, int* index, int size, float negative_slope) {
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = size;
while (tid < N)
{
if (x[tid] > 0) index[tid] = 1;
else x[tid] *= negative_slope;
tid += gridDim.x*blockDim.x;
}
}
__global__ void kernel_reluForward_inference(float* x, int size, float negative_slope) {
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = size;
while (tid < N)
{
if (x[tid] <= 0) x[tid] *= negative_slope;
tid += gridDim.x*blockDim.x;
}
}
void reluForward_gpu(float* x, int* index, int size, float negative_slope) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
int buffer = size;
new_gpu<int>(index, buffer);
kernel_reluForward_training << < dimGrid, dimBlock >> > (x, index, size, negative_slope);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
void reluForward_gpu(float* x, int size, float negative_slope) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
kernel_reluForward_inference << < dimGrid, dimBlock >> > (x, size, negative_slope);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
__global__ void kernel_reluBackward(float* dout, int* index, int size, float negative_slope) {
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = size;
while (tid < N)
{
if (!index[tid]) dout[tid] *= negative_slope;
tid += gridDim.x*blockDim.x;
}
}
void reluBackward_gpu(float* dout, int* index, int size, float negative_slope) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
kernel_reluBackward << < dimGrid, dimBlock >> > (dout, index, size, negative_slope);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
delete_gpu<int>(index);
}
void softmax(float* x,
int r, int c) {
float temp1, temp2;
for (int i = 0; i < r; i++) {
temp1 = 0.;
temp2 = 0.;
for (int j = 0; j < c; j++)
{
temp1 = max(x[i*c + j], temp1);
}
for (int j = 0; j < c; j++)
{
x[i*c + j] = expf(x[i*c + j] - temp1);
temp2 += x[i*c + j];
}
for (int j = 0; j < c; j++) x[i*c + j] /= temp2;
}
}
__global__ void kernel_softmax(float* x, int r, int c) {
unsigned int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= r) return;
float temp1 = 0., temp2 = 0.;
for (int j = 0; j < c; j++) temp1 = max(x[i*c + j], temp1);
for (int j = 0; j < c; j++) {
x[i*c + j] = expf(x[i*c + j] - temp1);
temp2 += x[i*c + j];
}
for (int j = 0; j < c; j++) x[i*c + j] /= temp2;
}
void softmax_gpu(float* x,
int r, int c) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid((r + BLOCK_SIZE - 1) / BLOCK_SIZE);
if (dimGrid.x > MAX_GRID_SIZE) {
cout << "dimension of Grid exceeds " << MAX_GRID_SIZE << " in 'Softmax_gpu'!" << endl;
}
kernel_softmax << < dimGrid, dimBlock >> > (x, r, c);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
}
float CEE(float* x, int* t,
int r, int c) {
float temp = 0;
for (int i = 0; i < r; i++) {
for (int j = 0; j < c; j++) {
if (t[i*c + j] == 1) { //one-hot encoding
temp += log(x[i*c + j] + 1e-7);
continue;
}
}
}
temp /= -r;
return temp;
}
__global__ void kernel_CEE(float* x, int* t, float* loss,
int r, int c) {
int i = blockDim.x*blockIdx.x + threadIdx.x;
int N = r;
float temp;
while (i < N)
{
for (int j = 0; j < c; j++) {
if (t[i*c + j] == 1) {
temp = logf(x[i*c + j] + 1e-7);
atomicAdd(loss, temp);
continue;
}
}
i += gridDim.x*blockDim.x;
}
}
float CEE_gpu(float* x, int* t, float* loss,
int r, int c) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
hipMemset(loss, 0, sizeof(float));
kernel_CEE << < dimGrid, dimBlock >> > (x, t, loss, r, c);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
float _loss = 0;
hipMemcpy(&_loss, loss, sizeof(float), hipMemcpyDeviceToHost);
_loss /= -r;
return _loss;
}
__global__ void kernel_softmaxBackward(float* dx, float* y, int* t,
int r, int c) {
unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x;
int N = r*c;
while (tid < N)
{
dx[tid] = (y[tid] - t[tid]) / r;
tid += gridDim.x*blockDim.x;
}
}
float* softmaxBackward_gpu(float* y, int* t,
int r, int c) {
int buffer = r*c;
float* dx = NULL;
new_gpu<float>(dx, buffer);
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
kernel_softmaxBackward << < dimGrid, dimBlock >> > (dx, y, t, r, c);
hipDeviceSynchronize();
gpuErrchk(hipGetLastError());
return dx;
} | 80fc352bb7bcb0b44fe43b0df68120ddd6c54ea9.cu | ///function for deep learning calculation
#include "Function.cuh"
cudaError_t cudaStatus;
void Check(int* output_shape, float* dev_out)
{
int size_result = 1;
for (int i = 0; i < 4; i++) size_result *= output_shape[i];
cout << "size: " << size_result << endl;
float* host_result = new float[size_result];
memset(host_result, 0, size_result * sizeof(float));
cudaMemcpy(host_result, dev_out, size_result * sizeof(float), cudaMemcpyDeviceToHost);
float sum = 0;
for (int i = 0; i < size_result; i++) sum += host_result[i];
cout << "average of output: " << sum / (float)size_result << endl;
int N = output_shape[0]; cout << "N=" << N << endl;
int C = output_shape[1]; cout << "C=" << C << endl;
int H = output_shape[2]; cout << "H=" << H << endl;
int W = output_shape[3]; cout << "W=" << W << endl;
float sum1 = 0;
float tmp1;
for (int l = 0; l < W; l++) {
tmp1 = host_result[0 * (C*H*W) + 0 * (H*W) + 0 * (W)+l];
cout << "value of output[0,0,0,:]: " << tmp1 << endl;
sum1 += tmp1;
}
cout << "average of output[0,0,0,:]: " << sum1 / (float)W << endl;
delete[] host_result;
}
//index change
__device__ __host__ void idx2d(int tid,
int ni, int nj,
int& i, int& j) {
i = tid / nj;
j = tid % nj;
}
__device__ __host__ void idx4d(int tid,
int ni, int nj, int nk, int nl,
int& i, int& j, int& k, int& l) {
i = tid / (nj*nk*nl);
tid = tid - (i*(nj*nk*nl));
j = tid / (nk*nl);
tid = tid - (j*(nk*nl));
k = tid / (nl);
l = tid % (nl);
}
__device__ __host__ void idx6d(int tid,
int ni, int nj, int nk, int nl, int nm, int nn,
int& i, int& j, int& k, int& l, int& m, int& n) {
i = tid / (nj*nk*nl*nm*nn);
tid = tid - (i*(nj*nk*nl*nm*nn));
j = tid / (nk*nl*nm*nn);
tid = tid - (j*(nk*nl*nm*nn));
k = tid / (nl*nm*nn);
tid = tid - (k*(nl*nm*nn));
l = tid / (nm*nn);
tid = tid - (l*(nm*nn));
m = tid / (nn);
n = tid % (nn);
}
void Dot(float* C, float* A, float* B, const int r, const int c, const int n) {
float temp;
for (int i = 0; i < r; i++) {
for (int j = 0; j < c; j++) {
temp = 0.0;
for (int k = 0; k < n; k++) {
temp += A[i*n + k] * B[k*c + j];
}
C[i*c + j] = temp;
}
}
}
__global__ void Kernel_Dot(float* C, float* A, float* B,
const int r, const int c, const int n) {
unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int N = r*c;
int i, j;
float temp, A_val, B_val;
while (tid < N)
{
temp = 0.0;
A_val = 0.0;
B_val = 0.0;
idx2d(tid, r, c, i, j);
for (int k = 0; k < n; k++) {
A_val = A[i*n + k];
B_val = B[k*c + j];
temp += A_val*B_val;
}
C[i*c + j] = temp;
tid += gridDim.x*blockDim.x;
}
}
void Dot_gpu(float* dev_C, float* dev_A, float* dev_B,
const int r, const int c, const int n) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
Kernel_Dot << < dimGrid, dimBlock >> > (dev_C, dev_A, dev_B, r, c, n);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
__global__ void Kernel_Dot_coalescing1(float* C, float* A, float* B,
const int r, const int c, const int n) {
unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int N = r*c;
int i, j;
float temp, A_val, B_val;
while (tid < N)
{
temp = 0.0;
A_val = 0.0;
B_val = 0.0;
idx2d(tid, r, c, i, j);
for (int k = 0; k < n; k++) {
A_val = A[k*r + i];
B_val = B[k*c + j];
temp += A_val*B_val;
}
C[i*c + j] = temp;
tid += gridDim.x*blockDim.x;
}
}
void Dot_coalescing1_gpu(float* dev_c, float* dev_a, float* dev_b,
const int r, const int c, const int n) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
Kernel_Dot_coalescing1 << < dimGrid, dimBlock >> > (dev_c, dev_a, dev_b, r, c, n);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
__global__ void Kernel_Dot_coalescing2(float* C, float* A, float* B,
const int r, const int c, const int n) {
unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int N = r*c;
int i, j;
float temp, A_val, B_val;
while (tid < N)
{
temp = 0.0;
A_val = 0.0;
B_val = 0.0;
idx2d(tid, r, c, i, j);
for (int k = 0; k < n; k++) {
A_val = A[i*n + k];
B_val = B[j*n + k];
temp += A_val*B_val;
}
C[i*c + j] = temp;
tid += gridDim.x*blockDim.x;
}
}
void Dot_coalescing2_gpu(float* dev_c, float* dev_a, float* dev_b,
const int r, const int c, const int n) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
Kernel_Dot_coalescing2 << < dimGrid, dimBlock >> > (dev_c, dev_a, dev_b, r, c, n);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
__global__ void Kernel_Dot_reduction1(float* dev_a, float* dev_b,
const int r, const int c, const int n,
float* reduction) {
__shared__ float shared[BLOCK_SIZE];
unsigned int k = blockDim.x*blockIdx.x + threadIdx.x;
unsigned int sharedIdx = threadIdx.x;
if (k >= n) return;
float A_val = 0;
float B_val = 0;
int m;
for (int i = 0; i < r; i++) {
for (int j = 0; j < c; j++) {
A_val = dev_a[i*n + k];
B_val = dev_b[k*c + j];
shared[sharedIdx] = A_val*B_val;
__syncthreads();
m = blockDim.x / 2;
while (m != 0) {
if (sharedIdx < m) shared[sharedIdx] += shared[sharedIdx + m];
__syncthreads();
m /= 2;
}
if (sharedIdx == 0) reduction[i*(c*gridDim.x) + j*(gridDim.x) + blockIdx.x] = shared[0];
__syncthreads();
}
}
}
__global__ void Kernel_Dot_reduction2(float* dev_c, float* reduction, int r, const int c, const int n,
int size_block) {
unsigned int i = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int j = blockDim.y * blockIdx.y + threadIdx.y;
if (i >= r || j >= c) return;
float temp = 0;
for (int k = 0; k < size_block; k++) {
temp += reduction[i*(c*size_block) + j*(size_block)+k];
}
dev_c[i*c + j] = temp;
}
void Dot_reduction_gpu(float* dev_c, float* dev_a, float* dev_b,
const int r, const int c, const int n,
float* reduction)
{
dim3 dimBlock1(BLOCK_SIZE);
dim3 dimGrid1((n + BLOCK_SIZE - 1) / BLOCK_SIZE);
Kernel_Dot_reduction1 << < dimGrid1, dimBlock1 >> > (dev_a, dev_b, r, c, n, reduction);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
int size_block = (n + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 dimBlock2(BLOCK_SIZE_X, BLOCK_SIZE_Y);
dim3 dimGrid2((r + BLOCK_SIZE_X - 1) / BLOCK_SIZE_X, (c + BLOCK_SIZE_Y - 1) / BLOCK_SIZE_Y);
Kernel_Dot_reduction2 << < dimGrid2, dimBlock2 >> > (dev_c, reduction, r, c, n, size_block);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
__global__ void Kernel_Dot_atomic(float* dev_c, float* dev_a, float* dev_b,
const int r, const int c, const int n) {
unsigned int k = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int N = n;
float temp, A_val, B_val;
while (k < N)
{
for (int i = 0; i < r; i++)
{
for (int j = 0; j < c; j++)
{
A_val = dev_a[i*n + k];
B_val = dev_b[k*c + j];
temp = A_val * B_val;
atomicAdd(&(dev_c[i*c + j]), temp);
}
}
k += gridDim.x*blockDim.x;
}
}
void Dot_atomic_gpu(float* dev_C, float* dev_A, float* dev_B,
const int r, const int c, const int n) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
Kernel_Dot_atomic << < dimGrid, dimBlock >> > (dev_C, dev_A, dev_B, r, c, n);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
void Sum(char txt, float* A, float* B, const int r, const int c) {
switch (txt)
{
case 'f':
for (int i = 0; i < r; i++) {
for (int j = 0; j < c; j++) {
A[i*c + j] += B[j];
}
}
break;
case 'b':
for (int j = 0; j < c; j++) {
A[j] = 0.0;
}
for (int j = 0; j < c; j++) {
for (int i = 0; i < r; i++) {
A[j] += B[i*c + j];
}
}
break;
default:
cout << "Error for 'txt' variable!" << endl;
break;
}
}
__global__ void Kernel_Sum_forward(float* dev_A, float* dev_B, const int r, const int c) {
unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x;
int N = r*c;
int i, j;
while (tid < N)
{
idx2d(tid, r, c, i, j);
dev_A[i*c + j] += dev_B[j];
tid += gridDim.x*blockDim.x;
}
}
__global__ void Kernel_Sum_backward(float* dev_A, float* dev_B, const int r, const int c) {
unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x;
int N = c;
int j;
while (tid < N)
{
j = tid;
dev_A[j] = 0.0;
for (int i = 0; i < r; i++) {
dev_A[j] += dev_B[i*c + j];
}
tid += gridDim.x*blockDim.x;
}
}
//template <unsigned int blockSize>
//__device__ void warpReduce(volatile float* sdata, int tid)
//{
// if (blockSize >= 64) sdata[tid] += sdata[tid + 32];
// if (blockSize >= 32) sdata[tid] += sdata[tid + 16];
// if (blockSize >= 16) sdata[tid] += sdata[tid + 8];
// if (blockSize >= 8) sdata[tid] += sdata[tid + 4];
// if (blockSize >= 4) sdata[tid] += sdata[tid + 2];
// if (blockSize >= 2) sdata[tid] += sdata[tid + 1];
//}
//__global__ void Kernel_Sum_backward_opt(float* dev_sum, float* dev_B, const int r, const int c) {
// __shared__ float sdata[(BLOCK_SIZE_opt / 2)];
// unsigned int tid = threadIdx.x;
// unsigned int i = (blockDim.x * 2) * blockIdx.x + threadIdx.x;
// //if (i >= r) return;
// for (int j = 0; j < c; j++) {
// sdata[tid] = dev_B[i*c + j] + dev_B[(i + blockDim.x)*c + j];
// __syncthreads();
// if (blockDim.x >= 512) {
// if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads();
// }
// if (blockDim.x >= 256) {
// if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads();
// }
// if (blockDim.x >= 128) {
// if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads();
// }
// if (tid < 32) warpReduce<BLOCK_SIZE_opt / 2>(sdata, tid);
// if (tid == 0) dev_sum[blockIdx.x*c + j] = sdata[0];
// __syncthreads();
// }
//}
__global__ void Kernel_Sum_backward_opt_sum(float* dev_A, float* dev_sum, int r_sum, const int c) {
unsigned int j = blockDim.x * blockIdx.x + threadIdx.x;
if (j >= c) return;
float temp = 0;
for (int i = 0; i < r_sum; i++) {
temp += dev_sum[i*c + j];
}
dev_A[j] = temp;
}
__global__ void Kernel_Sum_backward1(float* dev_B, float* dev_partial, const int r, const int c) {
__shared__ float cache[BLOCK_SIZE];
unsigned int i = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int cacheIndex = threadIdx.x;
if (i >= r) return;
for (int j = 0; j < c; j++) {
cache[cacheIndex] = dev_B[i*c + j];
__syncthreads();
int k = blockDim.x / 2;
while (k != 0) {
if (cacheIndex < k) cache[cacheIndex] += cache[cacheIndex + k];
__syncthreads();
k /= 2;
}
if (cacheIndex == 0) dev_partial[blockIdx.x*c + j] = cache[0];
__syncthreads();
}
}
__global__ void Kernel_Sum_backward2(float* dev_A, float* dev_partial, const int r, const int c,
int size_partial) {
unsigned int j = blockDim.x * blockIdx.x + threadIdx.x;
if (j >= c) return;
int i;
float temp = 0;
for (i = 0; i < size_partial; i++) {
temp += dev_partial[i*c + j];
}
dev_A[j] = temp;
}
void Sum_gpu(char txt, float* dev_A, float* dev_B, const int r, const int c) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
switch (txt)
{
case 'f':
Kernel_Sum_forward << < dimGrid, dimBlock >> > (dev_A, dev_B, r, c);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
break;
case 'b':
Kernel_Sum_backward << < dimGrid, dimBlock >> > (dev_A, dev_B, r, c);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
break;
default:
cout << "Error for 'txt' variable!" << endl;
break;
}
}
void Sum_gpu(char txt, float* dev_A, float* dev_B, const int r, const int c,
float* dev_sum)
{
if (txt != 'b')
cout << "(Sum_gpu) this function should be in backward" << endl;
dim3 dimBlock(BLOCK_SIZE_opt / 2); //halve the number of threads
dim3 dimGrid((r + BLOCK_SIZE_opt - 1) / BLOCK_SIZE_opt);
//Kernel_Sum_backward_opt << < dimGrid, dimBlock >> > (dev_sum, dev_B, r, c);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
int r_sum = (r + BLOCK_SIZE_opt - 1) / BLOCK_SIZE_opt;
dim3 dimBlock_sum(BLOCK_SIZE_opt);
dim3 dimGrid_sum((c + BLOCK_SIZE_opt - 1) / BLOCK_SIZE_opt);
Kernel_Sum_backward_opt_sum << < dimGrid_sum, dimBlock_sum >> > (dev_A, dev_sum, r_sum, c);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
void Sum_gpu1(char txt, float* dev_A, float* dev_B, const int r, const int c,
float* dev_partial, int size_partial) {
dim3 dimBlock2(BLOCK_SIZE);
dim3 dimGrid2((r + BLOCK_SIZE - 1) / BLOCK_SIZE);
if (dimGrid2.x > MAX_GRID_SIZE) {
cout << "dimension of Grid exceeds " << MAX_GRID_SIZE << " in 'Sum_gpu'!" << endl;
}
Kernel_Sum_backward1 << < dimGrid2, dimBlock2 >> > (dev_B, dev_partial, r, c);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
dim3 dimBlock3(BLOCK_SIZE);
dim3 dimGrid3((c + BLOCK_SIZE - 1) / BLOCK_SIZE);
if (dimGrid3.x > MAX_GRID_SIZE) {
cout << "dimension of Grid exceeds " << MAX_GRID_SIZE << " in 'Sum_gpu'!" << endl;
}
Kernel_Sum_backward2 << < dimGrid3, dimBlock3 >> > (dev_A, dev_partial, r, c, size_partial);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
/*loss function*/
float MSE(float** x1, float** x2, const int r, const int c) {
float temp = 0.0;
for (int i = 0; i < r; i++) {
for (int j = 0; j < c; j++) {
temp += pow(x1[i][j] - x2[i][j], 2);
}
}
temp /= 2.0*r;
return temp;
}
void Softmax(float* x, const int r, const int c) {
float temp1, temp2;
for (int i = 0; i < r; i++) {
temp1 = 0.;
temp2 = 0.;
for (int j = 0; j < c; j++)
{
temp1 = max(x[i*c + j], temp1);
}
for (int j = 0; j < c; j++)
{
x[i*c + j] = expf(x[i*c + j] - temp1);
temp2 += x[i*c + j];
}
for (int j = 0; j < c; j++) x[i*c + j] /= temp2;
}
}
void Softmax_seg(float* x, const int size_category, const int size_spatial_feature_map)
{
int c = size_category;
int size = size_spatial_feature_map;
float temp1, temp2;
for (int i = 0; i < size; i++) {
temp1 = 0.;
temp2 = 0.;
for (int j = 0; j < c; j++)
{
temp1 = max(x[j*size + i], temp1);
}
for (int j = 0; j < c; j++)
{
x[j*size + i] = expf(x[j*size + i] - temp1);
temp2 += x[j*size + i];
}
for (int j = 0; j < c; j++) x[j*size + i] /= temp2;
}
}
__global__ void Kernel_Softmax(float* dev_x, const int r, const int c) {
unsigned int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= r) return;
float temp1 = 0., temp2 = 0.;
for (int j = 0; j < c; j++) temp1 = max(dev_x[i*c + j], temp1);
for (int j = 0; j < c; j++) {
dev_x[i*c + j] = expf(dev_x[i*c + j] - temp1);
temp2 += dev_x[i*c + j];
}
for (int j = 0; j < c; j++) dev_x[i*c + j] /= temp2;
}
void Softmax_gpu(float* dev_x, const int r, const int c) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid((r + BLOCK_SIZE - 1) / BLOCK_SIZE);
if (dimGrid.x > MAX_GRID_SIZE) {
cout << "dimension of Grid exceeds " << MAX_GRID_SIZE << " in 'Softmax_gpu'!" << endl;
}
Kernel_Softmax << < dimGrid, dimBlock >> > (dev_x, r, c);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
__global__ void Kernel_Softmax_seg(float* dev_x, const int c, const int size) {
unsigned int i = blockDim.x * blockIdx.x + threadIdx.x;
int N = size;
float temp = 0.;
while (i < N)
{
for (int j = 0; j < c; j++)
temp = max(dev_x[j*size + i], temp);
for (int j = 0; j < c; j++)
dev_x[j*size + i] = expf(dev_x[j*size + i] - temp);
temp = 0.0;
for (int j = 0; j < c; j++)
temp += dev_x[j*size + i];
for (int j = 0; j < c; j++)
dev_x[j*size + i] /= temp;
i += gridDim.x*blockDim.x;
}
}
void Softmax_seg_gpu(float* dev_x, const int size_category, const int size_spatial_feature_map) {
int size = size_spatial_feature_map;
int c = size_category;
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
Kernel_Softmax_seg << < dimGrid, dimBlock >> > (dev_x, c, size);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
__global__ void Kernel_Softmax4d(float* dev_x, int N, int C, int H, int W) {
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int Max = H*W;
if (tid >= Max) return;
int i, j;
idx2d(tid, H, W, i, j);
int idx;
float temp_max = 0;
for (int n = 0; n < C; n++) temp_max = max(dev_x[0 * (C*H*W) + n*(H*W) + i*(W)+j], temp_max);
float temp_sum = 0;
for (int n = 0; n < C; n++) temp_sum += expf(dev_x[0 * (C*H*W) + n*(H*W) + i*(W)+j] - temp_max);
for (int n = 0; n < C; n++) dev_x[0 * (C*H*W) + n*(H*W) + i*(W)+j] = expf(dev_x[0 * (C*H*W) + n*(H*W) + i*(W)+j] - temp_max) / temp_sum;
}
void Softmax4d_gpu(float* dev_x, int N, int C, int H, int W) {
if (N != 1) // the batch size 'XN' must be 1
{
cout << "the batch size 'N' must be 1! N=[" << N << "] at Softmax4d_gpu" << endl;
}
int size = H*W;
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid((size + BLOCK_SIZE - 1) / BLOCK_SIZE);
if (dimGrid.x > MAX_GRID_SIZE) {
cout << "dimension of Grid exceeds " << MAX_GRID_SIZE << " in 'Softmax4d_gpu'!" << endl;
}
Kernel_Softmax4d << < dimGrid, dimBlock >> > (dev_x, N, C, H, W);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
__global__ void Softmax_shared1(float* dev_x, const int XN, const int DN, float* dev_partialX4d, int size_partialX4d) {
unsigned int cacheIdx = threadIdx.x;
__shared__ float cache[BLOCK_SIZE];
cache[cacheIdx] = 0;
__syncthreads();
unsigned int tid = blockDim.x*blockIdx.x + threadIdx.x;
if (tid >= DN) return;
cache[cacheIdx] = dev_x[tid];
__syncthreads();
int k = blockDim.x / 2;
while (k != 0) {
if (cacheIdx < k) cache[cacheIdx] = max(cache[cacheIdx], cache[cacheIdx + k]);
__syncthreads();
k /= 2;
}
if (cacheIdx == 0) dev_partialX4d[blockIdx.x] = cache[0];
__syncthreads();
}
__global__ void Softmax_shared2(float* dev_x, const int XN, const int DN, float* dev_partialX4d, int size_partialX4d) {
unsigned int tid = blockDim.x*blockIdx.x + threadIdx.x;
if (tid >= DN) return;
float sum = 0;
for (int i = 0; i < size_partialX4d; i++)
{
sum += dev_partialX4d[i];
}
dev_x[tid] = expf(dev_x[tid] - sum);
unsigned int cacheIdx = threadIdx.x;
__shared__ float cache[BLOCK_SIZE];
cache[cacheIdx] = 0;
__syncthreads();
cache[cacheIdx] = dev_x[tid];
__syncthreads();
int k = blockDim.x / 2;
while (k != 0) {
if (cacheIdx < k) cache[cacheIdx] += cache[cacheIdx + k];
__syncthreads();
k /= 2;
}
if (cacheIdx == 0) dev_partialX4d[blockIdx.x] = cache[0];
__syncthreads();
}
__global__ void Softmax_shared3(float* dev_x, const int XN, const int DN, float* dev_partialX4d, int size_partialX4d) {
unsigned int tid = blockDim.x*blockIdx.x + threadIdx.x;
if (tid >= DN) return;
float sum = 0;
for (int i = 0; i < size_partialX4d; i++)
{
sum += dev_partialX4d[i];
}
dev_x[tid] /= sum;
}
void Softmax_gpu_shared(float* dev_x, const int XN, const int DN, float* dev_partialX4d, int size_partialX4d) {
if (XN != 1) // the batch size 'XN' must be 1
{
cout << "the batch size 'XN' must be 1! XN=[" << XN << ']' << endl;
}
dim3 dimBlock1(BLOCK_SIZE);
dim3 dimGrid1((DN + BLOCK_SIZE - 1) / BLOCK_SIZE);
if (dimGrid1.x > MAX_GRID_SIZE) {
cout << "dimension of Grid exceeds " << MAX_GRID_SIZE << " in 'Softmax_gpu_shared1'!" << endl;
}
Softmax_shared1 << < dimGrid1, dimBlock1 >> > (dev_x, XN, DN, dev_partialX4d, size_partialX4d);
gpuErrchk(cudaGetLastError());
dim3 dimBlock2(BLOCK_SIZE);
dim3 dimGrid2((DN + BLOCK_SIZE - 1) / BLOCK_SIZE);
if (dimGrid2.x > MAX_GRID_SIZE) {
cout << "dimension of Grid exceeds " << MAX_GRID_SIZE << " in 'Softmax_gpu_shared2'!" << endl;
}
Softmax_shared2 << < dimGrid2, dimBlock2 >> > (dev_x, XN, DN, dev_partialX4d, size_partialX4d);
gpuErrchk(cudaGetLastError());
dim3 dimBlock3(BLOCK_SIZE);
dim3 dimGrid3((DN + BLOCK_SIZE - 1) / BLOCK_SIZE);
if (dimGrid3.x > MAX_GRID_SIZE) {
cout << "dimension of Grid exceeds " << MAX_GRID_SIZE << " in 'Softmax_gpu_shared3'!" << endl;
}
Softmax_shared3 << < dimGrid3, dimBlock3 >> > (dev_x, XN, DN, dev_partialX4d, size_partialX4d);
gpuErrchk(cudaGetLastError());
}
float CEE_seg(float* x, int* t, const int size_category, const int size_spatial_feature_map)
{
int c = size_category;
int size = size_spatial_feature_map;
float temp = 0;
for (int j = 0; j < size; j++) {
for (int i = 0; i < c; i++) {
if (i == t[j]) {
temp += log(x[i*size + j] + 1e-7);
continue;
}
}
}
temp /= -size;
return temp;
}
__global__ void Kernel_CEE_seg(float* dev_x, int* dev_t, float* dev_loss, const int c, const int size)
{
int j = blockDim.x*blockIdx.x + threadIdx.x;
int N = size;
float temp = 0;
while (j < N)
{
for (int i = 0; i < c; i++) {
if (i == dev_t[j]) {
temp = logf(dev_x[i*size + j] + 1e-7);
atomicAdd(dev_loss, temp);
continue;
}
}
j += gridDim.x*blockDim.x;
}
}
float CEE_seg_gpu(float* dev_x, int* dev_t, float* dev_loss,
const int size_category, const int size_spatial_feature_map) {
int c = size_category;
int size = size_spatial_feature_map;
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
cudaMemset(dev_loss, 0, sizeof(float));
Kernel_CEE_seg << < dimGrid, dimBlock >> > (dev_x, dev_t, dev_loss, c, size);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
float loss = 0;
cudaMemcpy(&loss, dev_loss, sizeof(float), cudaMemcpyDeviceToHost);
loss /= -size;
return loss;
}
/*padding and stride*/
void Padding_forward(char txt, float* x_pad, float* x, const int pad,
const int XN, const int XC, const int XH, const int XW) {
int idx, idx_pad;
int XH_pad = XH + 2 * pad;
int XW_pad = XW + 2 * pad;
for (int i = 0; i < XN; i++) {
for (int j = 0; j < XC; j++) {
for (int k = 0; k < XH; k++) {
for (int l = 0; l < XW; l++) {
idx = i*(XC*XH*XW) + j*(XH*XW) + k*(XW)+l;
idx_pad = i*(XC*XH_pad*XW_pad) + j*(XH_pad*XW_pad) + (k + pad)*(XW_pad)+(l + pad);
x_pad[idx_pad] = x[idx];
}
}
}
}
}
void Padding_backward(char txt, float* dx_pad, float* dx, const int pad,
const int XN, const int XC, const int XH, const int XW,
const int dXH, const int dXW)
{
int i, j, k, l;
int idx_dx, idx_dx_pad;
for (i = 0; i < XN; i++) {
for (j = 0; j < XC; j++) {
for (k = 0; k < XH; k++) {
for (l = 0; l < XW; l++) {
idx_dx_pad = i*(XC*XH*XW) + j*(XH*XW) + k*(XW)+l;
idx_dx = i*(XC*dXH*dXW) + j*(dXH*dXW) + (k + pad)*(dXW)+(l + pad);
dx_pad[idx_dx_pad] = dx[idx_dx];
}
}
}
}
}
__global__ void Kernel_Padding_forward(float* dev_x_pad, float*dev_X, const int pad,
const int XN, const int XC, const int XH, const int XW) {
int i, j, k, l;
int XH_pad = XH + 2 * pad;
int XW_pad = XW + 2 * pad;
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = XN*XC*XH*XW;
int idx_pad, idx;
while (tid < N)
{
idx4d(tid, XN, XC, XH, XW, i, j, k, l);
idx_pad = i*(XC*XH_pad*XW_pad) + j*(XH_pad*XW_pad) + (k + pad)*(XW_pad)+(l + pad);
idx = i*(XC*XH*XW) + j*(XH*XW) + k*(XW)+l;
dev_x_pad[idx_pad] = dev_X[idx];
tid += gridDim.x*blockDim.x;
}
}
void Padding_forward_gpu(float* dev_x_pad, float* dev_X, const int pad,
const int XN, const int XC, const int XH, const int XW) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
Kernel_Padding_forward << < dimGrid, dimBlock >> > (dev_x_pad, dev_X, pad, XN, XC, XH, XW);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
__global__ void Kernel_Padding_backward(float* dev_dx_pad, float*dev_dx, const int pad,
const int XN, const int XC, const int XH, const int XW,
const int dXH, const int dXW) {
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = XN*XC*XH*XW;
int i, j, k, l, idx_dx_pad, idx_dx;
while (tid < N)
{
idx4d(tid, XN, XC, XH, XW, i, j, k, l);
idx_dx_pad = i*(XC*XH*XW) + j*(XH*XW) + k*(XW)+l;
idx_dx = i*(XC*dXH*dXW) + j*(dXH*dXW) + (k + pad)*(dXW)+(l + pad);
dev_dx_pad[idx_dx_pad] = dev_dx[idx_dx];
tid += gridDim.x*blockDim.x;
}
}
void Padding_backward_gpu(float* dev_dx_pad, float*dev_dx, const int pad,
const int XN, const int XC, const int XH, const int XW,
const int dxH, const int dxW) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
Kernel_Padding_backward << < dimGrid, dimBlock >> > (dev_dx_pad, dev_dx, pad, XN, XC, XH, XW, dxH, dxW);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
void Padding_transpose_forward(float* x_pad, float* x, int stride, int pad,
int XN, int XC, int XH, int XW, int XH_pad, int XW_pad)
{
int idx_pad, idx;
for (int i = 0; i < XN; i++) {
for (int j = 0; j < XC; j++) {
for (int k = 0; k < XH; k++) {
for (int l = 0; l < XW; l++) {
idx_pad = i*(XC*XH_pad*XW_pad) + j*(XH_pad*XW_pad) + (stride*k + pad)*(XW_pad)+(stride*l + pad);
idx = i*(XC*XH*XW) + j*(XH*XW) + k*(XW)+l;
x_pad[idx_pad] = x[idx];
}
}
}
}
}
void Padding_transpose_backward(float* dx_pad, float* dx, int stride, int pad,
int XN, int XC, int XH, int XW, int dXH, int dXW)
{
int idx_dx_pad, idx_dx;
for (int i = 0; i < XN; i++) {
for (int j = 0; j < XC; j++) {
for (int k = 0; k < XH; k++) {
for (int l = 0; l < XW; l++) {
idx_dx_pad = i*(XC*XH*XW) + j*(XH*XW) + k*(XW)+l;
idx_dx = i*(XC*dXH*dXW) + j*(dXH*dXW) + (stride*k + pad)*(dXW)+(stride*l + pad);
dx_pad[idx_dx_pad] = dx[idx_dx];
}
}
}
}
}
__global__ void Kernel_Padding_transpose_forward(float* dev_x_pad, float* dev_x, int stride, int pad,
int XN, int XC, int XH, int XW, int XH_pad, int XW_pad) {
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = XN*XC*XH*XW;
int i, j, k, l, idx_x_pad, idx_x;
while (tid < N)
{
idx4d(tid, XN, XC, XH, XW, i, j, k, l);
idx_x_pad = i*(XC*XH_pad*XW_pad) + j*(XH_pad*XW_pad) + (stride*k + pad)*(XW_pad)+(stride*l + pad);
idx_x = i*(XC*XH*XW) + j*(XH*XW) + k*(XW)+l;
dev_x_pad[idx_x_pad] = dev_x[idx_x];
tid += gridDim.x*blockDim.x;
}
}
void Padding_transpose_forward_gpu(float* dev_x_pad, float* dev_x, int stride, int pad,
int XN, int XC, int XH, int XW, int XH_pad, int XW_pad) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
Kernel_Padding_transpose_forward << < dimGrid, dimBlock >> > (dev_x_pad, dev_x, stride, pad, XN, XC, XH, XW, XH_pad, XW_pad);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
__global__ void Kernel_Padding_transpose_backward(float* dev_dx_pad, float* dev_dx, int stride, int pad,
int XN, int XC, int XH, int XW, int dXH, int dXW)
{
int i, j, k, l;
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = XN*XC*XH*XW;
if (tid >= N) return;
idx4d(tid, XN, XC, XH, XW, i, j, k, l);
int idx_dx_pad = i*(XC*XH*XW) + j*(XH*XW) + k*(XW)+l;
int idx_dx = i*(XC*dXH*dXW) + j*(dXH*dXW) + (stride*k + pad)*(dXW)+(stride*l + pad);
dev_dx_pad[idx_dx_pad] = dev_dx[idx_dx];
}
void Padding_transpose_backward_gpu(float* dev_dx_pad, float* dev_dx, int stride, int pad,
int XN, int XC, int XH, int XW, int dXH, int dXW)
{
int size = XN*XC*XH*XW;
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid((size + BLOCK_SIZE - 1) / BLOCK_SIZE);
if (dimGrid.x > MAX_GRID_SIZE || dimGrid.y > MAX_GRID_SIZE) {
cout << "dimension of Grid exceeds " << MAX_GRID_SIZE << " in 'padding_transpose_backward_gpu'!" << endl;
}
Kernel_Padding_transpose_backward << < dimGrid, dimBlock >> > (dev_dx_pad, dev_dx, stride, pad, XN, XC, XH, XW, dXH, dXW);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
void Stride_forward(float* col, float* img, int stride,
const int XN, const int XC, const int FH, const int FW, const int OH, const int OW,
const int XH, const int XW) {
int i, j, k, l, m, n, a, b;
int y_max, x_max;
int idx_col, idx_img;
for (i = 0; i < XN; i++) {
for (j = 0; j < XC; j++) {
for (k = 0; k < FH; k++) {
for (l = 0; l < FW; l++) {
for (m = 0; m < OH; m++) {
for (n = 0; n < OW; n++) {
idx_col = i*(XC*FH*FW*OH*OW) + j*(FH*FW*OH*OW) + k*(FW*OH*OW) + l*(OH*OW) + m*(OW)+n;
col[idx_col] = 0;
}
}
}
}
}
}
for (i = 0; i < XN; i++) {
for (j = 0; j < XC; j++) {
for (k = 0; k < FH; k++) {
y_max = k + stride*OH;
for (l = 0; l < FW; l++) {
x_max = l + stride*OW;
for (a = k, m = 0; a < y_max; a = a + stride, m++) {
for (b = l, n = 0; b < x_max; b = b + stride, n++) {
idx_col = i*(XC*FH*FW*OH*OW) + j*(FH*FW*OH*OW) + k*(FW*OH*OW) + l*(OH*OW) + m*(OW)+n;
idx_img = i*(XC*XH*XW) + j*(XH*XW) + a*(XW)+b;
col[idx_col] = img[idx_img];
}
}
}
}
}
}
}
__global__ void Kernel_Stride_forward(float* dev_col, float* dev_img, int stride,
const int XN, const int XC, const int FH, const int FW, const int OH, const int OW,
const int XH, const int XW) {
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = XN*XC*FH*FW*OH*OW;
int i, j, k, l, m, n, a, b;
int idx_col;
int idx_img;
while (tid < N)
{
idx6d(tid, XN, XC, FH, FW, OH, OW, i, j, k, l, m, n);
a = k + m*stride;
b = l + n*stride;
idx_col = i*(XC*FH*FW*OH*OW) + j*(FH*FW*OH*OW) + k*(FW*OH*OW) + l*(OH*OW) + m*(OW)+n;
idx_img = i*(XC*XH*XW) + j*(XH*XW) + a*(XW)+b;
dev_col[idx_col] = 0;
dev_col[idx_col] = dev_img[idx_img];
tid += gridDim.x * blockDim.x;
}
}
void Stride_forward_gpu(float* dev_col, float* dev_img, int stride,
const int XN, const int XC, const int FH, const int FW, const int OH, const int OW,
const int XH, const int XW) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
Kernel_Stride_forward << < dimGrid, dimBlock >> > (dev_col, dev_img, stride, XN, XC, FH, FW, OH, OW, XH, XW);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
void Stride_backward(float* img, float* col, int stride,
const int XN, const int XC, const int FH, const int FW, const int OH, const int OW,
const int XH, const int XW) {
int i, j, k, l, m, n;
int y_max, x_max;
int idx_img, idx_col;
for (i = 0; i < XN; i++) {
for (j = 0; j < XC; j++) {
for (k = 0; k < XH; k++) {
for (l = 0; l < XW; l++) {
idx_img = i*(XC*XH*XW) + j*(XH*XW) + k*(XW)+l;
img[idx_img] = 0;
}
}
}
}
for (i = 0; i < XN; i++) {
for (j = 0; j < XC; j++) {
for (k = 0; k < FH; k++) {
y_max = k + stride*OH;
for (l = 0; l < FW; l++) {
x_max = l + stride*OW;
for (int a = k, m = 0; a < y_max; a = a + stride, m++) {
for (int b = l, n = 0; b < x_max; b = b + stride, n++) {
idx_img = i*(XC*XH*XW) + j*(XH*XW) + a*(XW)+b;
idx_col = i*(XC*FH*FW*OH*OW) + j*(FH*FW*OH*OW) + k*(FW*OH*OW) + l*(OH*OW) + m*(OW)+n;
img[idx_img] += col[idx_col];
}
}
}
}
}
}
}
__global__ void Kernel_Stride_backward(float* dev_img, float* dev_col, int stride,
const int XN, const int XC, const int FH, const int FW, const int OH, const int OW,
const int XH, const int XW) {
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = XN*XC*XH*XW;
int i, j, a, b, idx_img, idx_col;
int k, l, m, n, temp;
while (tid < N)
{
idx4d(tid, XN, XC, XH, XW, i, j, a, b);
idx_img = i*(XC*XH*XW) + j*(XH*XW) + a*(XW)+b;
dev_img[idx_img] = 0;
for (k = 0; k < FH && k <= a; k++)
{
m = (a - k) / stride;
temp = k + stride*m;
if (temp != a || m >= OH)
continue;
for (l = 0; l < FW && l <= b; l++)
{
n = (b - l) / stride;
temp = l + stride*n;
if (temp != b || n >= OW)
continue;
idx_col = i*(XC*FH*FW*OH*OW) + j*(FH*FW*OH*OW) + k*(FW*OH*OW) + l*(OH*OW) + m*(OW)+n;
dev_img[idx_img] += dev_col[idx_col];
}
}
tid += gridDim.x*blockDim.x;
}
}
void Stride_backward_gpu(float* dev_img, float* dev_col, int stride,
const int XN, const int XC, const int FH, const int FW, const int OH, const int OW,
const int XH, const int XW) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
Kernel_Stride_backward << < dimGrid, dimBlock >> > (dev_img, dev_col, stride, XN, XC, FH, FW, OH, OW, XH, XW);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
/*reshape and transpose*/
void Flatten6d(float* flattenX, float****** X,
const int d1, const int d2, const int d3, const int d4, const int d5, const int d6) {
for (int i = 0; i < d1; i++) {
for (int j = 0; j < d2; j++) {
for (int k = 0; k < d3; k++) {
for (int l = 0; l < d4; l++) {
for (int m = 0; m < d5; m++) {
for (int n = 0; n < d6; n++) {
flattenX[i*(d2*d3*d4*d5*d6) + j*(d3*d4*d5*d6) + k*(d4*d5*d6) + l*(d5*d6) + m*(d6)+n]
= X[i][j][k][l][m][n];
}
}
}
}
}
}
}
void Flatten4d(float* flattenX, float**** X,
const int d1, const int d2, const int d3, const int d4) {
int i, j, k, l;
for (i = 0; i < d1; i++) {
for (j = 0; j < d2; j++) {
for (k = 0; k < d3; k++) {
for (l = 0; l < d4; l++) {
flattenX[i*(d2*d3*d4) + j*(d3*d4) + k*(d4)+l] = X[i][j][k][l];
}
}
}
}
}
void Flatten2d(float* flattenX, float** X,
const int d1, const int d2) {
int i, j;
for (i = 0; i < d1; i++) {
for (j = 0; j < d2; j++) {
flattenX[i*(d2)+j] = X[i][j];
}
}
}
void Flatten2d_int(int* flattenX, int** X,
const int d1, const int d2) {
int i, j;
for (i = 0; i < d1; i++) {
for (j = 0; j < d2; j++) {
flattenX[i*(d2)+j] = X[i][j];
}
}
}
void Reshape6to2(float** reshapeArray, float****** array,
const int XN, const int OH, const int OW, const int XC, const int FH, const int FW) {
int i, j, k, l, m, n;
for (i = 0; i < XN; i++) {
for (j = 0; j < OH; j++) {
for (k = 0; k < OW; k++) {
for (l = 0; l < XC; l++) {
for (m = 0; m < FH; m++) {
for (n = 0; n < FW; n++) {
reshapeArray[i*(OH*OW) + j*(OW)+k][l*(FH*FW) + m*(FH)+n] = array[i][j][k][l][m][n];
}
}
}
}
}
}
}
void Reshape6to2_gpu(float* dev_reshapeArray, float* dev_array,
const int XN, const int OH, const int OW, const int XC, const int FH, const int FW,
float* host_reshapeArray, int size_reshapeArray) {
//Kernel_Reshape6to2 << < 1, 1 >> > (dev_reshapeArray, dev_array, XN, OH, OW, XC, FH, FW);
//cudaDeviceSynchronize();
//cudaMemcpy(host_reshapeArray, dev_reshapeArray, size_reshapeArray * sizeof(float), cudaMemcpyDeviceToHost);
}
void Reshape6to2_poolingForward(float** reshapeArray, float****** array,
const int XN, const int OH, const int OW, const int XC, const int FH, const int FW) {
int i, j, k, l, m, n;
for (i = 0; i < XN; i++) {
for (j = 0; j < OH; j++) {
for (k = 0; k < OW; k++) {
for (l = 0; l < XC; l++) {
for (m = 0; m < FH; m++) {
for (n = 0; n < FW; n++) {
reshapeArray[i*(OH*OW*XC) + j*(OW*XC) + k*(XC)+l][m*(FW)+n] = array[i][j][k][l][m][n];
}
}
}
}
}
}
}
void Reshape4to2_forward(float** reshapeArray, float**** array,
const int FN, const int FC, const int FH, const int FW) {
int i, j, k, l;
for (i = 0; i < FN; i++) {
for (j = 0; j < FC; j++) {
for (k = 0; k < FH; k++) {
for (l = 0; l < FW; l++) {
reshapeArray[i][j*(FH*FW) + k*(FW)+l] = array[i][j][k][l];
}
}
}
}
}
void Reshape4to2_backward(float** reshapeArray, float**** array,
const int XN, const int OH, const int OW, const int FN) {
int i, j, k, l;
for (i = 0; i < XN; i++) {
for (j = 0; j < OH; j++) {
for (k = 0; k < OW; k++) {
for (l = 0; l < FN; l++) {
reshapeArray[i*(OH*OW) + j*(OW)+k][l] = array[i][j][k][l];
}
}
}
}
}
void Reshape4to2(char txt, float** reshapeArray, float**** array,
const int d1, const int d2, const int d3, const int d4) {
int FN, FC, FH, FW, XN, OH, OW;
int i, j, k, l;
switch (txt)
{
case 'f':
FN = d1;
FC = d2;
FH = d3;
FW = d4;
Reshape4to2_forward(reshapeArray, array, FN, FC, FH, FW);
break;
case 'b':
XN = d1;
OH = d2;
OW = d3;
FN = d4;
Reshape4to2_backward(reshapeArray, array, XN, OH, OW, FN);
break;
default:
cout << "Error for 'txt' variable in Reshape4to2(cpu)!" << endl;
break;
}
}
void Reshape2to4_forward(float**** reshapeArray, float** array,
const int XN, const int OH, const int OW, const int FN) {
int i, j, k, l;
for (i = 0; i < XN; i++) {
for (j = 0; j < OH; j++) {
for (k = 0; k < OW; k++) {
for (l = 0; l < FN; l++) {
reshapeArray[i][j][k][l] = array[i*(OH*OW) + j*(OW)+k][l];
}
}
}
}
}
void Reshape2to4_backward(float**** reshapeArray, float** array,
const int XN, const int XC, const int XH, const int XW) {
int i, j, k, l;
for (i = 0; i < XN; i++) {
for (j = 0; j < XC; j++) {
for (k = 0; k < XH; k++) {
for (l = 0; l < XW; l++) {
reshapeArray[i][j][k][l] = array[i][j*(XH*XW) + k*(XW)+l];
}
}
}
}
}
void Reshape2to4(char txt, float**** reshapeArray, float** array,
const int d1, const int d2, const int d3, const int d4) {
int XN, OH, OW, FN, XC, XH, XW;
int i, j, k, l;
switch (txt)
{
case 'f':
XN = d1;
OH = d2;
OW = d3;
FN = d4;
Reshape2to4_forward(reshapeArray, array, XN, OH, OW, FN);
break;
case 'b':
XN = d1;
XC = d2;
XH = d3;
XW = d4;
Reshape2to4_backward(reshapeArray, array, XN, XC, XH, XW);
break;
default:
cout << "Error for 'txt' variable in Reshape2to4(cpu)!" << endl;
break;
}
}
void Reshape2to6(float****** reshapeArray, float** array,
const int XN, const int OH, const int OW, const int XC, const int FH, const int FW) {
int i, j, k, l, m, n;
for (i = 0; i < XN; i++) {
for (j = 0; j < OH; j++) {
for (k = 0; k < OW; k++) {
for (l = 0; l < XC; l++) {
for (m = 0; m < FH; m++) {
for (n = 0; n < FW; n++) {
reshapeArray[i][j][k][l][m][n] = array[i*(OH*OW) + j*(OW)+k][l*(FH*FW) + m*(FH)+n];
}
}
}
}
}
}
}
void Reshape1to6(float****** reshapeArray, float* array,
const int d1, const int d2, const int d3, const int d4, const int d5, const int d6) {
int i, j, k, l, m, n;
for (i = 0; i < d1; i++) {
for (j = 0; j < d2; j++) {
for (k = 0; k < d3; k++) {
for (l = 0; l < d4; l++) {
for (m = 0; m < d5; m++) {
for (n = 0; n < d6; n++) {
reshapeArray[i][j][k][l][m][n] = array[i*(d2*d3*d4*d5*d6) + j*(d3*d4*d5*d6) + k*(d4*d5*d6) + l*(d5*d6) + m*(d6)+n];
}
}
}
}
}
}
}
void Reshape1to4(float**** reshapeArray, float* array,
const int XN, const int OH, const int OW, const int XC) {
int i, j, k, l;
for (i = 0; i < XN; i++) {
for (j = 0; j < OH; j++) {
for (k = 0; k < OW; k++) {
for (l = 0; l < XC; l++) {
reshapeArray[i][j][k][l] = array[i*(OH*OW*XC) + j*(OW*XC) + k*(XC)+l];
}
}
}
}
}
void Reshape1to2(float** reshapeArray, float* array,
const int d1, const int d2) {
int i, j;
for (i = 0; i < d1; i++) {
for (j = 0; j < d2; j++) {
reshapeArray[i][j] = array[i*(d2)+j];
}
}
}
void Transpose2d(float* array_transpose, float* array, const int r, const int c) {
int i, j;
for (i = 0; i < r; i++) {
for (j = 0; j < c; j++) {
array_transpose[j*r + i] = array[i*c + j];
}
}
}
__global__ void Kernel_Transpose2d(float* dev_transposeArray, float* dev_array,
const int r, const int c) {
//unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x;
//int N = r*c;
//int i, j, idx_transposeArray, idx_array;
//while (tid < N)
//{
// idx2d(tid, r, c, i, j);
// idx_array = i*c + j;
// idx_transposeArray = j*r + i;
// dev_transposeArray[idx_transposeArray] = dev_array[idx_array];
// tid += gridDim.x * blockDim.x;
//}
unsigned int i = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int j = blockDim.y * blockIdx.y + threadIdx.y;
if (i >= r || j >= c) return;
int idx_transposeArray, idx_array;
idx_array = i*c + j;
idx_transposeArray = j*r + i;
dev_transposeArray[idx_transposeArray] = dev_array[idx_array];
}
void Transpose2d_gpu(float* dev_transposeArray, float* dev_array, const int r, const int c) {
//dim3 dimBlock(BLOCK_SIZE);
//dim3 dimGrid(GRID_SIZE);
//Kernel_Transpose2d << < dimGrid, dimBlock >> > (dev_transposeArray, dev_array, r, c);
//cudaDeviceSynchronize();
//gpuErrchk(cudaGetLastError());
dim3 dimBlock(BLOCK_SIZE_X, BLOCK_SIZE_Y);
dim3 dimGrid((r + BLOCK_SIZE_X - 1) / BLOCK_SIZE_X, (c + BLOCK_SIZE_Y - 1) / BLOCK_SIZE_Y);
Kernel_Transpose2d << < dimGrid, dimBlock >> > (dev_transposeArray, dev_array, r, c);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
void Transpose4d_forward(float* array_transpose, float* array,
const int XN, const int OH, const int OW, const int FN) {
int i, j, k, l;
int idx_transpose, idx;
for (i = 0; i < XN; i++) {
for (j = 0; j < OH; j++) {
for (k = 0; k < OW; k++) {
for (l = 0; l < FN; l++) {
idx_transpose = i*(FN*OH*OW) + l*(OH*OW) + j*(OW)+k;
idx = i*(OH*OW*FN) + j*(OW*FN) + k*(FN)+l;
array_transpose[idx_transpose] = array[idx];
}
}
}
}
}
__global__ void Kernel_Transpose4d_forward(float* dev_transposeArray, float* dev_array,
const int XN, const int OH, const int OW, const int FN) {
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = XN*OH*OW*FN;
int i, j, k, l;
int idx_transposeArray, idx_array;
while (tid < N)
{
idx4d(tid, XN, OH, OW, FN, i, j, k, l);
idx_transposeArray = i*(FN*OH*OW) + l*(OH*OW) + j*(OW)+k;
idx_array = i*(OH*OW*FN) + j*(OW*FN) + k*(FN)+l;
dev_transposeArray[idx_transposeArray] = dev_array[idx_array];
tid += gridDim.x*blockDim.x;
}
}
void Transpose4d_backward(float* array_transpose, float* array,
const int XN, const int XC, const int OH, const int OW) {
int i, j, k, l;
int idx_transpose, idx;
for (i = 0; i < XN; i++) {
for (j = 0; j < XC; j++) {
for (k = 0; k < OH; k++) {
for (l = 0; l < OW; l++) {
idx_transpose = i*(OH*OW*XC) + k*(OW*XC) + l*(XC)+j;
idx = i*(XC*OH*OW) + j*(OH*OW) + k*(OW)+l;
array_transpose[idx_transpose] = array[idx];
}
}
}
}
}
__global__ void Kernel_Transpose4d_backward(float* dev_transposeArray, float* dev_array,
const int XN, const int FN, const int OH, const int OW) {
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = XN*FN*OH*OW;
int i, j, k, l;
int idx_transposeArray, idx_array;
while (tid < N)
{
idx4d(tid, XN, FN, OH, OW, i, j, k, l);
idx_transposeArray = i*(OH*OW*FN) + k*(OW*FN) + l*(FN)+j;
idx_array = i*(FN*OH*OW) + j*(OH*OW) + k*(OW)+l;
dev_transposeArray[idx_transposeArray] = dev_array[idx_array];
tid += gridDim.x*blockDim.x;
}
}
void Transpose4d(char txt, float* array_transpose, float* array,
const int d1, const int d2, const int d3, const int d4) {
int XN, OH, OW, FN, XC;
switch (txt)
{
case 'f':
XN = d1;
OH = d2;
OW = d3;
FN = d4;
Transpose4d_forward(array_transpose, array, XN, OH, OW, FN);
break;
case 'b':
XN = d1;
XC = d2;
OH = d3;
OW = d4;
Transpose4d_backward(array_transpose, array, XN, XC, OH, OW);
break;
default:
cout << "Error for 'txt' variable in Transpose4d(cpu)!" << endl;
break;
}
}
void Transpose4d_gpu(char txt, float* dev_transposeArray, float* dev_array,
const int d1, const int d2, const int d3, const int d4) {
int XN, OH, OW, FN;
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
switch (txt)
{
case 'f':
XN = d1;
OH = d2;
OW = d3;
FN = d4;
if (XN == 1) {
Transpose2d_gpu(dev_transposeArray, dev_array, OH*OW, FN);
}
else {
Kernel_Transpose4d_forward << < dimGrid, dimBlock >> > (dev_transposeArray, dev_array, XN, OH, OW, FN);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
//Kernel_Transpose4d_forward << < dimGrid, dimBlock >> > (dev_transposeArray, dev_array, XN, OH, OW, FN);
//cudaDeviceSynchronize();
//gpuErrchk(cudaGetLastError());
break;
case 'b':
XN = d1;
FN = d2;
OH = d3;
OW = d4;
if (XN == 1) {
Transpose2d_gpu(dev_transposeArray, dev_array, FN, OH*OW);
}
else {
Kernel_Transpose4d_backward << < dimGrid, dimBlock >> > (dev_transposeArray, dev_array, XN, FN, OH, OW);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
//Kernel_Transpose4d_backward << < dimGrid, dimBlock >> > (dev_transposeArray, dev_array, XN, FN, OH, OW);
//cudaDeviceSynchronize();
//gpuErrchk(cudaGetLastError());
break;
default:
cout << "Error for 'txt' variable in Transpose4d(gpu)!" << endl;
break;
}
}
void Transpose6d_forward(float* array_transpose, float* array,
const int XN, const int XC, const int FH, const int FW, const int OH, const int OW) {
int i, j, k, l, m, n;
int idx_transpose;
int idx;
for (i = 0; i < XN; i++) {
for (j = 0; j < XC; j++) {
for (k = 0; k < FH; k++) {
for (l = 0; l < FW; l++) {
for (m = 0; m < OH; m++) {
for (n = 0; n < OW; n++) {
idx_transpose = i*(OH*OW*XC*FH*FW) + m*(OW*XC*FH*FW) + n*(XC*FH*FW) + j*(FH*FW) + k*(FW)+l;
idx = i*(XC*FH*FW*OH*OW) + j*(FH*FW*OH*OW) + k*(FW*OH*OW) + l*(OH*OW) + m*(OW)+n;
array_transpose[idx_transpose] = array[idx];
}
}
}
}
}
}
}
__global__ void Kernel_Transpose6d_forward(float* dev_transposeArray, float* dev_array,
const int XN, const int XC, const int FH, const int FW, const int OH, const int OW) {
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = XN*XC*FH*FW*OH*OW;
int i, j, k, l, m, n;
int idx_transposeArray;
int idx_array;
while (tid < N)
{
idx6d(tid, XN, XC, FH, FW, OH, OW, i, j, k, l, m, n);
idx_transposeArray = i*(OH*OW*XC*FH*FW) + m*(OW*XC*FH*FW) + n*(XC*FH*FW) + j*(FH*FW) + k*(FW)+l;
idx_array = i*(XC*FH*FW*OH*OW) + j*(FH*FW*OH*OW) + k*(FW*OH*OW) + l*(OH*OW) + m*(OW)+n;
dev_transposeArray[idx_transposeArray] = dev_array[idx_array];
tid += gridDim.x *blockDim.x;
}
}
void Transpose6d_backward(float* array_transpose, float* array,
const int XN, const int OH, const int OW, const int XC, const int FH, const int FW) {
int i, j, k, l, m, n;
int idx_transpose;
int idx;
for (i = 0; i < XN; i++) {
for (j = 0; j < OH; j++) {
for (k = 0; k < OW; k++) {
for (l = 0; l < XC; l++) {
for (m = 0; m < FH; m++) {
for (n = 0; n < FW; n++) {
idx_transpose = i*(XC*FH*FW*OH*OW) + l*(FH*FW*OH*OW) + m*(FW*OH*OW) + n*(OH*OW) + j*(OW)+k;
idx = i*(OH*OW*XC*FH*FW) + j*(OW*XC*FH*FW) + k*(XC*FH*FW) + l*(FH*FW) + m*(FH)+n;
array_transpose[idx_transpose] = array[idx];
}
}
}
}
}
}
}
__global__ void Kernel_Transpose6d_backward(float* dev_transposeArray, float* dev_array,
const int XN, const int OH, const int OW, const int XC, const int FH, const int FW) {
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = XN*OH*OW*XC*FH*FW;
int i, j, k, l, m, n;
int idx_transposeArray;
int idx_array;
while (tid < N)
{
idx6d(tid, XN, OH, OW, XC, FH, FW, i, j, k, l, m, n);
idx_transposeArray = i*(XC*FH*FW*OH*OW) + l*(FH*FW*OH*OW) + m*(FW*OH*OW) + n*(OH*OW) + j*(OW)+k;
idx_array = i*(OH*OW*XC*FH*FW) + j*(OW*XC*FH*FW) + k*(XC*FH*FW) + l*(FH*FW) + m*(FH)+n;
dev_transposeArray[idx_transposeArray] = dev_array[idx_array];
tid += gridDim.x*blockDim.x;
}
}
void Transpose6d(char txt, float* array_transpose, float* array,
const int d1, const int d2, const int d3, const int d4, const int d5, const int d6) {
int XN, OH, OW, FN, XC, FH, FW;
int i, j, k, l;
switch (txt)
{
case 'f':
XN = d1;
XC = d2;
FH = d3;
FW = d4;
OH = d5;
OW = d6;
Transpose6d_forward(array_transpose, array, XN, XC, FH, FW, OH, OW);
break;
case 'b':
XN = d1;
OH = d2;
OW = d3;
XC = d4;
FH = d5;
FW = d6;
Transpose6d_backward(array_transpose, array, XN, OH, OW, XC, FH, FW);
break;
default:
cout << "Error for 'txt' variable in Transpose6d(cpu)!" << endl;
break;
}
}
void Transpose6d_gpu(char txt, float* dev_transposeArray, float* dev_array,
const int d1, const int d2, const int d3, const int d4, const int d5, const int d6) {
int XN, OH, OW, FN, XC, FH, FW;
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
switch (txt)
{
case 'f':
XN = d1;
XC = d2;
FH = d3;
FW = d4;
OH = d5;
OW = d6;
if (XN == 1) {
Transpose2d_gpu(dev_transposeArray, dev_array, XC*FH*FW, OH*OW);
}
else {
Kernel_Transpose6d_forward << < dimGrid, dimBlock >> > (dev_transposeArray, dev_array, XN, XC, FH, FW, OH, OW);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
//Kernel_Transpose6d_forward << < dimGrid, dimBlock >> > (dev_transposeArray, dev_array, XN, XC, FH, FW, OH, OW);
//cudaDeviceSynchronize();
//gpuErrchk(cudaGetLastError());
break;
case 'b':
XN = d1;
OH = d2;
OW = d3;
XC = d4;
FH = d5;
FW = d6;
if (XN == 1) {
Transpose2d_gpu(dev_transposeArray, dev_array, OH*OW, XC*FH*FW);
}
else {
Kernel_Transpose6d_backward << < dimGrid, dimBlock >> > (dev_transposeArray, dev_array, XN, OH, OW, XC, FH, FW);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
//Kernel_Transpose6d_backward << < dimGrid, dimBlock >> > (dev_transposeArray, dev_array, XN, OH, OW, XC, FH, FW);
//cudaDeviceSynchronize();
//gpuErrchk(cudaGetLastError());
break;
default:
cout << "Error for 'txt' variable in Transpose6d(gpu)!" << endl;
break;
}
}
void Argmax(int* argMax, float** array, const int r, const int c) {
int idx;
float temp;
for (int i = 0; i < r; i++) {
idx = 0;
temp = 0.0;
for (int j = 0; j < c; j++) {
if (array[i][j] > temp) {
temp = array[i][j];
idx = j;
}
}
argMax[i] = idx;
}
}
__global__ void Kernel_Argmax(int* dev_argMax, float* dev_array, const int r, const int c) {
unsigned int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= r) return;
int idx;
float temp = 0.0;
for (int j = 0; j < c; j++) {
if (dev_array[i*c + j] > temp) {
temp = dev_array[i*c + j];
idx = j;
}
}
dev_argMax[i] = idx;
}
void Argmax_gpu(int* dev_argMax, float* dev_array, const int r, const int c) {
dim3 dimBlock(BLOCK_SIZE_X);
dim3 dimGrid((r + BLOCK_SIZE_X - 1) / BLOCK_SIZE_X);
if (dimGrid.x > MAX_GRID_SIZE) {
cout << "dimension of Grid exceeds " << MAX_GRID_SIZE << " in 'Argmax_gpu'!" << endl;
}
Kernel_Argmax << < dimGrid, dimBlock >> > (dev_argMax, dev_array, r, c);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
void Max(float* array_max, int* arg_max, float* array,
const int r, const int c) {
float temp;
int idx;
for (int i = 0; i < r; i++) {
idx = 0;
temp = 0.0;
for (int j = 0; j < c; j++) {
if (array[i*c + j] > temp) {
temp = array[i*c + j];
idx = j;
}
}
arg_max[i] = idx;
array_max[i] = temp;
}
}
__global__ void Kernel_Max(float* dev_arrayMax, int* dev_argMax, float* dev_array,
const int r, const int c) {
unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x;
int N = r;
int idx, i;
float temp;
while (tid < N)
{
i = tid;
temp = 0.;
idx = 0;
for (int j = 0; j < c; j++) {
if (j == 0) temp = dev_array[i*c + j], idx = 0;
else if (dev_array[i*c + j] > temp) temp = dev_array[i*c + j], idx = j;
}
dev_argMax[i] = idx;
dev_arrayMax[i] = temp;
tid += gridDim.x*blockDim.x;
}
}
void Max_gpu(float* dev_arrayMax, int* dev_argMax, float* dev_array,
const int r, const int c) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
Kernel_Max << < dimGrid, dimBlock >> > (dev_arrayMax, dev_argMax, dev_array, r, c);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
void Avg(float* array_avg, float* array,
const int r, const int c)
{
float sum;
for (int i = 0; i < r; i++) {
sum = 0.0;
for (int j = 0; j < c; j++) {
sum += array[i*c + j];
}
array_avg[i] = sum / c;
}
}
__global__ void Kernel_Avg(float* dev_arrayMax, float* dev_array,
const int r, const int c) {
unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x;
int N = r;
float sum;
int i;
while (tid < N)
{
i = tid;
sum = 0.0;
for (int j = 0; j < c; j++) {
sum += dev_array[i*c + j];
}
dev_arrayMax[i] = sum / c;
tid += gridDim.x*blockDim.x;
}
}
void Avg_gpu(float* dev_arrayMax, float* dev_array,
const int r, const int c) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
Kernel_Avg << < dimGrid, dimBlock >> > (dev_arrayMax, dev_array, r, c);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
void Function1_poolingBackward(float* dmax, int* arg_max, float* array,
const int i_dmax, const int j_dmax) {
int i, j;
int r = i_dmax, c = j_dmax;
for (i = 0; i < r; i++) {
for (j = 0; j < c; j++) {
dmax[i*c + j] = 0;
}
dmax[i*c + arg_max[i]] = array[i];
}
}
__global__ void Kernel_Function1_poolingBackward(float* dev_dmax, int* dev_argMax, float* dev_flattenDout,
const int i_dmax, const int j_dmax) {
unsigned int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = i_dmax*j_dmax;
int i, j;
while (tid < N)
{
idx2d(tid, i_dmax, j_dmax, i, j);
dev_dmax[i*j_dmax + j] = 0;
dev_dmax[i*j_dmax + (dev_argMax[i])] = dev_flattenDout[i];
tid += gridDim.x*blockDim.x;
}
}
void Function1_poolingBackward_gpu(float* dev_dmax, int* dev_argMax, float* dev_flattenDout,
const int i_dmax, const int j_dmax) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
Kernel_Function1_poolingBackward << < dimGrid, dimBlock >> > (dev_dmax, dev_argMax, dev_flattenDout, i_dmax, j_dmax);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
void Function1_poolingBackward_avg(float* dmax, float* array,
const int i_dmax, const int j_dmax)
{
int i, j;
int r = i_dmax, c = j_dmax;
for (i = 0; i < r; i++) {
for (j = 0; j < c; j++) {
dmax[i*c + j] = array[i] / c;
}
}
}
__global__ void Kernel_Function1_poolingBackward_avg(float* dev_dmax, float* dev_flattenDout,
const int i_dmax, const int j_dmax) {
unsigned int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = i_dmax*j_dmax;
int i, j;
while (tid < N)
{
idx2d(tid, i_dmax, j_dmax, i, j);
dev_dmax[i*j_dmax + j] = dev_flattenDout[i] / j_dmax;
tid += gridDim.x*blockDim.x;
}
}
void Function1_poolingBackward_avg_gpu(float* dev_dmax, float* dev_flattenDout,
const int i_dmax, const int j_dmax) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
Kernel_Function1_poolingBackward_avg << < dimGrid, dimBlock >> > (dev_dmax, dev_flattenDout, i_dmax, j_dmax);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
void Function2_poolingBackward(float** dcol, float** dmax,
const int XN, const int OH, const int OW, const int XC, const int FH, const int FW) {
int i, j, k, l, m, n;
for (i = 0; i < XN; i++) {
for (j = 0; j < OH; j++) {
for (k = 0; k < OW; k++) {
for (l = 0; l < XC; l++) {
for (m = 0; m < FH; m++) {
for (n = 0; n < FW; n++) {
dcol[i*(OH*OW) + j*(OW)+k][l*(FH*FW) + m*(FH)+n] = dmax[i*(OH*OW*XC) + j*(OW*XC) + k*(XC)+l][m*(FW)+n];
}
}
}
}
}
}
}
__global__ void Kernel_Function_reluForward(float* dev_x, int* dev_index, const int size) {
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = size;
while (tid < N)
{
if (dev_x[tid] > 0) dev_index[tid] = 1;
else dev_index[tid] = 0;
dev_x[tid] *= dev_index[tid];
tid += gridDim.x*blockDim.x;
}
}
void Function_reluForward_gpu(float* dev_x, int* dev_index, const int size) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
Kernel_Function_reluForward << < dimGrid, dimBlock >> > (dev_x, dev_index, size);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
__global__ void Kernel_Function_reluBackward(float* dev_dout, int* dev_index, const int size) {
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = size;
while (tid < N)
{
dev_dout[tid] *= dev_index[tid];
tid += gridDim.x*blockDim.x;
}
}
void Function_reluBackward_gpu(float* dev_dout, int* dev_index, const int size) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
Kernel_Function_reluBackward << < dimGrid, dimBlock >> > (dev_dout, dev_index, size);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
__global__ void Kernel_softmaxBackward(float* dev_dx, float* dev_y, int* dev_t,
const int r, const int c) {
unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x;
int N = r*c;
while (tid < N)
{
dev_dx[tid] = (dev_y[tid] - dev_t[tid]) / r;
tid += gridDim.x*blockDim.x;
}
}
void Function_softmaxBackward_gpu(float* dev_dx, float* dev_y, int* dev_t,
const int r, const int c) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
Kernel_softmaxBackward << < dimGrid, dimBlock >> > (dev_dx, dev_y, dev_t, r, c);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
/*batch*/
__global__ void Kernel_Function_batch1(float* dev_x, float* dev_x_batch,
const int BN, const int XC, const int XH, const int XW,
int randomNumber) {
int i, j, k, l;
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = BN*XC*XH*XW;
if (tid >= N) return;
idx4d(tid, BN, XC, XH, XW, i, j, k, l);
int idx_batch = i*(XC*XH*XW) + j*(XH*XW) + k*(XW)+l;
int idx = (i + randomNumber)*(XC*XH*XW) + j*(XH*XW) + k*(XW)+l;
dev_x_batch[idx_batch] = dev_x[idx];
}
__global__ void Kernel_Function_batch2(int* dev_t, int* dev_t_batch,
const int BN, const int ON, int randomNumber) {
int i, j;
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = BN*ON;
if (tid >= N) return;
idx2d(tid, BN, ON, i, j);
int idx_batch = i*ON + j;
int idx = (i + randomNumber)*ON + j;
dev_t_batch[idx_batch] = dev_t[idx];
}
void Function_batch_gpu(float* dev_x, int* dev_t, float* dev_x_batch, int* dev_t_batch,
const int BN, const int XC, const int XH, const int XW,
const int ON, int randomNumber) {
int size = BN*XC*XH*XW;
dim3 dimBlock1(BLOCK_SIZE);
dim3 dimGrid1((size + BLOCK_SIZE - 1) / BLOCK_SIZE);
if (dimGrid1.x > MAX_GRID_SIZE) {
cout << "dimension of Grid exceeds " << MAX_GRID_SIZE << " in 'Function_batch_gpu'!" << endl;
}
Kernel_Function_batch1 << < dimGrid1, dimBlock1 >> > (dev_x, dev_x_batch, BN, XC, XH, XW, randomNumber);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
size = BN*ON;
dim3 dimBlock2(BLOCK_SIZE);
dim3 dimGrid2((size + BLOCK_SIZE - 1) / BLOCK_SIZE);
if (dimGrid2.x > MAX_GRID_SIZE) {
cout << "dimension of Grid exceeds " << MAX_GRID_SIZE << " in 'Function_batch_gpu'!" << endl;
}
Kernel_Function_batch2 << < dimGrid2, dimBlock2 >> > (dev_t, dev_t_batch, BN, ON, randomNumber);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
/*dropout*/
__global__ void Kernel_Function_dropoutinit(unsigned int seed, curandState_t* states, const int size) {
unsigned int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = size;
while (tid < N)
{
curand_init(seed, /* the seed can be the same for each core, here we pass the time in from the cpu */
tid, /* the sequence number should be different for each core (unless you want all
cores to get the same sequence of numbers for some reason - use thread id! */
0, /* the offset is how much extra we advance in the sequence for each call, can be 0 */
&states[tid]);
tid += gridDim.x*blockDim.x;
}
}
void Function_dropoutinit_gpu(unsigned int seed, curandState_t* states, const int size) {
dim3 dimBlock(512);
dim3 dimGrid(GRID_SIZE);
Kernel_Function_dropoutinit << < dimGrid, dimBlock >> > (seed, states, size);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
__global__ void Kernel_Function_dropoutForward(float* dev_x, int* dev_index, const int size,
float dropoutRatio, int train_flg,
curandState_t* states) {
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = size;
float randomNumber;
while (tid < N)
{
if (train_flg == 1) {
randomNumber = curand_uniform(&states[tid]);
if (randomNumber > dropoutRatio) dev_index[tid] = 1;
else dev_index[tid] = 0;
dev_x[tid] *= dev_index[tid];
}
else {
dev_x[tid] *= (1.0/* - dropoutRatio*/);
}
tid += gridDim.x*blockDim.x;
}
}
void Function_dropoutForward_gpu(float* dev_x, int* dev_index, const int size,
float dropoutRatio, int train_flg,
curandState_t* states) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
Kernel_Function_dropoutForward << < dimGrid, dimBlock >> > (dev_x, dev_index, size, dropoutRatio, train_flg, states);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
__global__ void Kernel_Function_dropoutBackward(float* dev_dout, int* dev_index, const int size) {
unsigned int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = size;
while (tid < N)
{
dev_dout[tid] *= dev_index[tid];
tid += gridDim.x*blockDim.x;
}
}
void Function_dropoutBackward_gpu(float* dev_dout, int* dev_index, const int size) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
Kernel_Function_dropoutBackward << < dimGrid, dimBlock >> > (dev_dout, dev_index, size);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
/*skip connection*/
__global__ void Kernel_Function_sc(float* dev_x, float* dev_x_skip, int size) {
unsigned int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = size;
while (tid < N)
{
dev_x[tid] += dev_x_skip[tid];
tid += gridDim.x*blockDim.x;
}
}
void Function_sc_gpu(float* dev_x, float* dev_x_skip, int size) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
Kernel_Function_sc << < dimGrid, dimBlock >> > (dev_x, dev_x_skip, size);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
/*BN*/
__global__ void Kernel_Function_bninit(float* dev_gamma, const int DN) {
unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid >= DN) return;
dev_gamma[tid] = 1;
}
void Function_bninit_gpu(float* dev_gamma, const int DN) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid((DN + BLOCK_SIZE - 1) / BLOCK_SIZE);
if (dimGrid.x > MAX_GRID_SIZE) {
cout << "dimension of Grid exceeds " << MAX_GRID_SIZE << " in 'Function_bninit'!" << endl;
}
Kernel_Function_bninit << < dimGrid, dimBlock >> > (dev_gamma, DN);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
__global__ void Kernel_Function1_bnForward(float* dev_mu, float* dev_x,
const int XN, const int DN) {
unsigned int cacheIdx = threadIdx.x;
__shared__ float cache[BLOCK_SIZE];
cache[cacheIdx] = 0;
__syncthreads();
unsigned int tid = blockIdx.x + threadIdx.x * DN;
if (threadIdx.x < XN) {
cache[cacheIdx] = dev_x[tid];
__syncthreads();
}
int k = blockDim.x / 2;
while (k != 0) {
if (cacheIdx < k) cache[cacheIdx] += cache[cacheIdx + k];
__syncthreads();
k /= 2;
}
if (cacheIdx == 0) dev_mu[blockIdx.x] = cache[0] / XN;
__syncthreads();
}
void Function1_bnForward_gpu(float* dev_mu, float* dev_x,
const int XN, const int DN) {
if (XN > BLOCK_SIZE) {
cout << "Batch size(XN) > " << BLOCK_SIZE << " in 'Function1_bnForward_gpu'" << endl;
}
Kernel_Function1_bnForward << < DN, BLOCK_SIZE >> > (dev_mu, dev_x, XN, DN);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
__global__ void Kernel_Function2_bnForward(float* dev_xc, float* dev_x, float* dev_mu,
const int XN, const int DN) {
int i = blockDim.x*blockIdx.x + threadIdx.x;
int j = blockDim.y*blockIdx.y + threadIdx.y;
if (i >= XN || j >= DN) return;
int idx = i*DN + j;
dev_xc[idx] = dev_x[idx] - dev_mu[j];
}
void Function2_bnForward_gpu(float* dev_xc, float* dev_x, float* dev_mu,
const int XN, const int DN) {
dim3 dimBlock(BLOCK_SIZE_X, BLOCK_SIZE_Y);
dim3 dimGrid((XN + BLOCK_SIZE_X - 1) / BLOCK_SIZE_X, (DN + BLOCK_SIZE_Y - 1) / BLOCK_SIZE_Y);
if (dimGrid.x > MAX_GRID_SIZE || dimGrid.y > MAX_GRID_SIZE) {
cout << "dimension of Grid exceeds " << MAX_GRID_SIZE << " in 'Function2_bnForward_gpu'!" << endl;
}
Kernel_Function2_bnForward << < dimGrid, dimBlock >> > (dev_xc, dev_x, dev_mu, XN, DN);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
__global__ void Kernel_Function3_bnForward(float* dev_std, float* dev_xc,
const int XN, const int DN) {
unsigned int cacheIdx = threadIdx.x;
__shared__ float cache[BLOCK_SIZE];
cache[cacheIdx] = 0;
__syncthreads();
unsigned int tid = blockIdx.x + threadIdx.x * DN;
if (threadIdx.x < XN) {
cache[cacheIdx] = dev_xc[tid] * dev_xc[tid];
__syncthreads();
}
int k = blockDim.x / 2;
while (k != 0) {
if (cacheIdx < k) cache[cacheIdx] += cache[cacheIdx + k];
__syncthreads();
k /= 2;
}
if (cacheIdx == 0) dev_std[blockIdx.x] = sqrtf(cache[0] / XN + 1e-7);
__syncthreads();
}
void Function3_bnForward_gpu(float* dev_std, float* dev_xc,
const int XN, const int DN) {
if (XN > BLOCK_SIZE) {
cout << "Batch size(XN) > " << BLOCK_SIZE << " in 'Function3_bnForward_gpu'" << endl;
}
Kernel_Function3_bnForward << < DN, BLOCK_SIZE >> > (dev_std, dev_xc, XN, DN);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
__global__ void Kernel_Function4_bnForward(float* dev_xn, float* dev_xc, float* dev_std,
const int XN, const int DN) {
int i = blockDim.x*blockIdx.x + threadIdx.x;
int j = blockDim.y*blockIdx.y + threadIdx.y;
if (i >= XN || j >= DN) return;
int idx = i*DN + j;
dev_xn[idx] = dev_xc[idx] / dev_std[j];
}
void Function4_bnForward_gpu(float* dev_xn, float* dev_xc, float* dev_std,
const int XN, const int DN) {
dim3 dimBlock(BLOCK_SIZE_X, BLOCK_SIZE_Y);
dim3 dimGrid((XN + BLOCK_SIZE_X - 1) / BLOCK_SIZE_X, (DN + BLOCK_SIZE_Y - 1) / BLOCK_SIZE_Y);
if (dimGrid.x > MAX_GRID_SIZE || dimGrid.y > MAX_GRID_SIZE) {
cout << "dimension of Grid exceeds " << MAX_GRID_SIZE << " in 'Function4_bnForward_gpu'!" << endl;
}
Kernel_Function4_bnForward << < dimGrid, dimBlock >> > (dev_xn, dev_xc, dev_std, XN, DN);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
__global__ void Kernel_Function5_bnForward(float* dev_running_mean, float* dev_running_var, float* dev_mu, float* dev_std,
float momentum, const int DN) {
unsigned int tid = blockDim.x*blockIdx.x + threadIdx.x;
dev_running_mean[tid] = momentum * dev_running_mean[tid] + (1 - momentum) * dev_mu[tid];
dev_running_var[tid] = momentum * dev_running_var[tid] + (1 - momentum) * dev_std[tid] * dev_std[tid];
}
void Function5_bnForward_gpu(float* dev_running_mean, float* dev_running_var, float* dev_mu, float* dev_std,
float momentum, const int DN) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid((DN + BLOCK_SIZE - 1) / BLOCK_SIZE);
if (dimGrid.x > MAX_GRID_SIZE) {
cout << "dimension of Grid exceeds " << MAX_GRID_SIZE << " in 'Function5_bnForward_gpu'!" << endl;
}
//Kernel_Function5_bnForward << < dimGrid, dimBlock >> > (dev_running_mean, dev_running_var, dev_mu, dev_std, momentum, DN);
//cudaDeviceSynchronize();
//gpuErrchk(cudaGetLastError());
}
__global__ void Kernel_Function6_bnForward(float* dev_x, float* dev_running_mean, float* dev_running_var,
const int XN, const int DN) {
unsigned int i = blockDim.x*blockIdx.x + threadIdx.x;
unsigned int j = blockDim.y*blockIdx.y + threadIdx.y;
if (i >= XN || j >= DN) return;
unsigned int idx = i*DN + j;
dev_x[idx] = (dev_x[idx] - dev_running_mean[j]) / sqrtf(dev_running_var[j] + 1e-7);
}
void Function6_bnForward_gpu(float* dev_x, float* dev_running_mean, float* dev_running_var,
const int XN, const int DN) {
dim3 dimBlock(BLOCK_SIZE_X, BLOCK_SIZE_Y);
dim3 dimGrid((XN + BLOCK_SIZE_X - 1) / BLOCK_SIZE_X, (DN + BLOCK_SIZE_Y - 1) / BLOCK_SIZE_Y);
if (dimGrid.x > MAX_GRID_SIZE || dimGrid.y > MAX_GRID_SIZE) {
cout << "dimension of Grid exceeds " << MAX_GRID_SIZE << " in 'Function6_bnForward_gpu'!" << endl;
}
Kernel_Function6_bnForward << < dimGrid, dimBlock >> > (dev_x, dev_running_mean, dev_running_var, XN, DN);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
__global__ void Kernel_Function7_bnForward(float* dev_x, float* dev_out, float* dev_gamma, float* dev_beta,
const int XN, const int DN) {
unsigned int i = blockDim.x*blockIdx.x + threadIdx.x;
unsigned int j = blockDim.y*blockIdx.y + threadIdx.y;
if (i >= XN || j >= DN) return;
unsigned int idx = i*DN + j;
dev_x[idx] = dev_gamma[j] * dev_out[idx] + dev_beta[j];
}
void Function7_bnForward_gpu(float* dev_x, float* dev_out, float* dev_gamma, float* dev_beta,
const int XN, const int DN) {
dim3 dimBlock(BLOCK_SIZE_X, BLOCK_SIZE_Y);
dim3 dimGrid((XN + BLOCK_SIZE_X - 1) / BLOCK_SIZE_X, (DN + BLOCK_SIZE_Y - 1) / BLOCK_SIZE_Y);
if (dimGrid.x > MAX_GRID_SIZE || dimGrid.y > MAX_GRID_SIZE) {
cout << "dimension of Grid exceeds " << MAX_GRID_SIZE << " in 'Function7_bnForward_gpu'!" << endl;
}
Kernel_Function7_bnForward << < dimGrid, dimBlock >> > (dev_x, dev_out, dev_gamma, dev_beta, XN, DN);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
__global__ void Function_bnForward(float* dev_running_mean, float* dev_running_var, float* dev_mu, float* dev_std,
float momentum, const int DN) {
unsigned int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i >= DN) return;
dev_running_mean[i] = momentum * dev_running_mean[i] + (1 - momentum) * dev_mu[i];
dev_running_var[i] = momentum * dev_running_var[i] + (1 - momentum) * dev_std[i] * dev_std[i];
}
void Function_bnForward_gpu(float* dev_running_mean, float* dev_running_var, float* dev_mu, float* dev_std,
float momentum, const int DN) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid((DN + BLOCK_SIZE - 1) / BLOCK_SIZE);
if (dimGrid.x > MAX_GRID_SIZE) {
cout << "dimension of Grid exceeds " << MAX_GRID_SIZE << " in 'Function_bnForward_test_gpu'!" << endl;
}
Function_bnForward << < dimGrid, dimBlock >> > (dev_running_mean, dev_running_var, dev_mu, dev_std, momentum, DN);
gpuErrchk(cudaGetLastError());
}
__global__ void Kernel_Function1_bnBackward(float* dev_dbeta, float* dev_dout,
const int XN, const int DN) {
unsigned int cacheIdx = threadIdx.x;
__shared__ float cache[BLOCK_SIZE];
cache[cacheIdx] = 0;
__syncthreads();
unsigned int tid = blockIdx.x + threadIdx.x * DN;
if (threadIdx.x < XN) {
cache[cacheIdx] = dev_dout[tid];
__syncthreads();
}
int k = blockDim.x / 2;
while (k != 0) {
if (cacheIdx < k) cache[cacheIdx] += cache[cacheIdx + k];
__syncthreads();
k /= 2;
}
if (cacheIdx == 0) dev_dbeta[blockIdx.x] = cache[0];
__syncthreads();
}
void Function1_bnBackward_gpu(float* dev_dbeta, float* dev_dout,
const int XN, const int DN) {
if (XN > BLOCK_SIZE) {
cout << "Batch size(XN) > " << BLOCK_SIZE << " in 'Function1_bnBackward_gpu'" << endl;
}
Kernel_Function1_bnBackward << < DN, BLOCK_SIZE >> > (dev_dbeta, dev_dout, XN, DN);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
__global__ void Kernel_Function2_bnBackward(float* dev_dgamma, float* dev_xn, float* dev_dout,
const int XN, const int DN) {
unsigned int cacheIdx = threadIdx.x;
__shared__ float cache[BLOCK_SIZE];
cache[cacheIdx] = 0;
__syncthreads();
unsigned int tid = blockIdx.x + threadIdx.x * DN;
if (threadIdx.x < XN) {
cache[cacheIdx] = dev_xn[tid] * dev_dout[tid];
__syncthreads();
}
int k = blockDim.x / 2;
while (k != 0) {
if (cacheIdx < k) cache[cacheIdx] += cache[cacheIdx + k];
__syncthreads();
k /= 2;
}
if (cacheIdx == 0) dev_dgamma[blockIdx.x] = cache[0];
__syncthreads();
}
void Function2_bnBackward_gpu(float* dev_dgamma, float* dev_xn, float* dev_dout,
const int XN, const int DN) {
if (XN > BLOCK_SIZE) {
cout << "Batch size(XN) > " << BLOCK_SIZE << " in 'Function2_bnBackward_gpu'" << endl;
}
Kernel_Function2_bnBackward << < DN, BLOCK_SIZE >> > (dev_dgamma, dev_xn, dev_dout, XN, DN);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
__global__ void Kernel_Function3_bnBackward(float* dev_dxn, float* dev_gamma, float* dev_dout, float* dev_dxc, float* dev_std,
const int XN, const int DN) {
unsigned int i = blockDim.x*blockIdx.x + threadIdx.x;
unsigned int j = blockDim.y*blockIdx.y + threadIdx.y;
if (i >= XN || j >= DN) return;
unsigned int idx = i*DN + j;
dev_dxn[idx] = dev_gamma[j] * dev_dout[idx];
dev_dxc[idx] = dev_dxn[idx] / dev_std[j];
}
void Function3_bnBackward_gpu(float* dev_dxn, float* dev_gamma, float* dev_dout, float* dev_dxc, float* dev_std,
const int XN, const int DN) {
dim3 dimBlock(BLOCK_SIZE_X, BLOCK_SIZE_Y);
dim3 dimGrid((XN + BLOCK_SIZE_X - 1) / BLOCK_SIZE_X, (DN + BLOCK_SIZE_Y - 1) / BLOCK_SIZE_Y);
if (dimGrid.x > MAX_GRID_SIZE || dimGrid.y > MAX_GRID_SIZE) {
cout << "dimension of Grid exceeds " << MAX_GRID_SIZE << " in 'Function3_bnBackward_gpu'!" << endl;
}
Kernel_Function3_bnBackward << < dimGrid, dimBlock >> > (dev_dxn, dev_gamma, dev_dout, dev_dxc, dev_std, XN, DN);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
__global__ void Kernel_Function4_bnBackward(float* dev_dstd, float* dev_dxn, float* dev_xc, float* dev_std,
const int XN, const int DN) {
unsigned int cacheIdx = threadIdx.x;
__shared__ float cache[BLOCK_SIZE];
cache[cacheIdx] = 0;
__syncthreads();
unsigned int tid = blockIdx.x + threadIdx.x * DN;
if (threadIdx.x < XN) {
cache[cacheIdx] = dev_dxn[tid] * dev_xc[tid] / (dev_std[blockIdx.x] * dev_std[blockIdx.x]);
__syncthreads();
}
int k = blockDim.x / 2;
while (k != 0) {
if (cacheIdx < k) cache[cacheIdx] += cache[cacheIdx + k];
__syncthreads();
k /= 2;
}
if (cacheIdx == 0) dev_dstd[blockIdx.x] = -cache[0];
__syncthreads();
}
void Function4_bnBackward_gpu(float* dev_dstd, float* dev_dxn, float* dev_xc, float* dev_std,
const int XN, const int DN) {
if (XN > BLOCK_SIZE) {
cout << "Batch size(XN) > " << BLOCK_SIZE << " in 'Function4_bnBackward_gpu'" << endl;
}
Kernel_Function4_bnBackward << < DN, BLOCK_SIZE >> > (dev_dstd, dev_dxn, dev_xc, dev_std, XN, DN);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
__global__ void Kernel_Function5_bnBackward(float* dev_dxc, float* dev_xc, float* dev_dstd, float* dev_std,
const int XN, const int DN, int batch_size) {
unsigned int i = blockDim.x*blockIdx.x + threadIdx.x;
unsigned int j = blockDim.y*blockIdx.y + threadIdx.y;
if (i >= XN || j >= DN) return;
unsigned int idx = i*DN + j;
float dvar = 0.5 * dev_dstd[j] / dev_std[j];
dev_dxc[idx] = dev_dxc[idx] + (2.0 / batch_size) * dev_xc[idx] * dvar;
}
void Function5_bnBackward_gpu(float* dev_dxc, float* dev_xc, float* dev_dstd, float* dev_std,
const int XN, const int DN, int batch_size) {
dim3 dimBlock(BLOCK_SIZE_X, BLOCK_SIZE_Y);
dim3 dimGrid((XN + BLOCK_SIZE_X - 1) / BLOCK_SIZE_X, (DN + BLOCK_SIZE_Y - 1) / BLOCK_SIZE_Y);
if (dimGrid.x > MAX_GRID_SIZE || dimGrid.y > MAX_GRID_SIZE) {
cout << "dimension of Grid exceeds " << MAX_GRID_SIZE << " in 'Function5_bnBackward_gpu'!" << endl;
}
Kernel_Function5_bnBackward << < dimGrid, dimBlock >> > (dev_dxc, dev_xc, dev_dstd, dev_std, XN, DN, batch_size);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
__global__ void Kernel_Function6_bnBackward(float* dev_dmu, float* dev_dxc,
const int XN, const int DN) {
unsigned int cacheIdx = threadIdx.x;
__shared__ float cache[BLOCK_SIZE];
cache[cacheIdx] = 0;
__syncthreads();
unsigned int tid = blockIdx.x + threadIdx.x * DN;
if (threadIdx.x < XN) {
cache[cacheIdx] = dev_dxc[tid];
__syncthreads();
}
int k = blockDim.x / 2;
while (k != 0) {
if (cacheIdx < k) cache[cacheIdx] += cache[cacheIdx + k];
__syncthreads();
k /= 2;
}
if (cacheIdx == 0) dev_dmu[blockIdx.x] = cache[0];
__syncthreads();
}
void Function6_bnBackward_gpu(float* dev_dmu, float* dev_dxc,
const int XN, const int DN) {
if (XN > BLOCK_SIZE) {
cout << "Batch size(XN) > " << BLOCK_SIZE << " in 'Function6_bnBackward_gpu'" << endl;
}
Kernel_Function6_bnBackward << < DN, BLOCK_SIZE >> > (dev_dmu, dev_dxc, XN, DN);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
__global__ void Kernel_Function7_bnBackward(float* dev_dout, float* dev_dxc, float* dev_dmu,
const int XN, const int DN, int batch_size) {
unsigned int i = blockDim.x*blockIdx.x + threadIdx.x;
unsigned int j = blockDim.y*blockIdx.y + threadIdx.y;
if (i >= XN || j >= DN) return;
unsigned int idx = i*DN + j;
dev_dout[idx] = dev_dxc[idx] - (dev_dmu[j] / batch_size);
}
void Function7_bnBackward_gpu(float* dev_dout, float* dev_dxc, float* dev_dmu,
const int XN, const int DN, int batch_size) {
dim3 dimBlock(BLOCK_SIZE_X, BLOCK_SIZE_Y);
dim3 dimGrid((XN + BLOCK_SIZE_X - 1) / BLOCK_SIZE_X, (DN + BLOCK_SIZE_Y - 1) / BLOCK_SIZE_Y);
if (dimGrid.x > MAX_GRID_SIZE || dimGrid.y > MAX_GRID_SIZE) {
cout << "dimension of Grid exceeds " << MAX_GRID_SIZE << " in 'Function7_bnBackward_gpu'!" << endl;
}
Kernel_Function7_bnBackward << < dimGrid, dimBlock >> > (dev_dout, dev_dxc, dev_dmu, XN, DN, batch_size);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
/*LRN*/
__global__ void Kernel_Function_lrnForward1(float* dev_x, float* dev_X, float* dev_y4,
float myBias, float myAlpha, int myDepth_radius,
const int XN, const int XC, const int XH, const int XW) {
int i, j, k, l, n;
unsigned int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = XN*XC*XH*XW;
if (tid >= N) return;
idx4d(tid, XN, XC, XH, XW, i, j, k, l);
int idx = i*(XC*XH*XW) + j*(XH*XW) + k*(XW)+l;
dev_X[idx] = dev_x[idx];
float sum = 0;
int idx_n;
for (n = j - myDepth_radius; n <= j + myDepth_radius; n++) {
if (n < 0 || n >= XC) continue;
idx_n = i*(XC*XH*XW) + n*(XH*XW) + k*(XW)+l;
sum += powf(dev_x[idx_n], 2);
}
dev_y4[idx] = (myBias + myAlpha * sum);
}
__global__ void Kernel_Function_lrnForward2(float* dev_x, float* dev_y4,
float myBeta,
const int size) {
unsigned int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = size;
if (tid >= N) return;
dev_x[tid] /= powf(dev_y4[tid], myBeta);
}
void Function_lrnForward_gpu(float* dev_x, float* dev_X, float* dev_y4,
float myBias, float myAlpha, float myBeta, int myDepth_radius,
const int XN, const int XC, const int XH, const int XW) {
int size = XN*XC*XH*XW;
dim3 dimBlock1(BLOCK_SIZE);
dim3 dimGrid1((size + BLOCK_SIZE - 1) / BLOCK_SIZE);
if (dimGrid1.x > MAX_GRID_SIZE) {
cout << "dimension of Grid exceeds " << MAX_GRID_SIZE << " in 'Function_lrnForward_gpu'!" << endl;
}
Kernel_Function_lrnForward1 << < dimGrid1, dimBlock1 >> > (dev_x, dev_X, dev_y4, myBias, myAlpha, myDepth_radius, XN, XC, XH, XW);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
dim3 dimBlock2(BLOCK_SIZE);
dim3 dimGrid2((size + BLOCK_SIZE - 1) / BLOCK_SIZE);
if (dimGrid2.x > MAX_GRID_SIZE) {
cout << "dimension of Grid exceeds " << MAX_GRID_SIZE << " in 'Function_lrnForward_gpu'!" << endl;
}
Kernel_Function_lrnForward2 << < dimGrid2, dimBlock2 >> > (dev_x, dev_y4, myBeta, size);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
__global__ void Kernel_Function_lrnBackward1(float* dev_dout, float* dev_dout_new, float* dev_X, float* dev_y4,
float myAlpha, float myBeta, int myDepth_radius,
const int XN, const int XC, const int XH, const int XW) {
int i, j, k, l, n;
unsigned int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = XN*XC*XH*XW;
if (tid >= N) return;
idx4d(tid, XN, XC, XH, XW, i, j, k, l);
int idx = i*(XC*XH*XW) + j*(XH*XW) + k*(XW)+l;
float sum = 0;
int idx_n;
for (n = j - myDepth_radius; n <= j + myDepth_radius; n++) {
if (n < 0 || n >= XC) continue;
idx_n = i*(XC*XH*XW) + n*(XH*XW) + k*(XW)+l;
sum += (dev_X[idx_n] * dev_dout[idx_n]) / powf(dev_y4[idx_n], myBeta + 1);
}
dev_dout_new[idx] = dev_dout[idx] / powf(dev_y4[idx], myBeta) - 2.0*myAlpha*myBeta * dev_X[idx] * sum;
}
__global__ void Kernel_Function_lrnBackward2(float* dev_dout, float* dev_dout_new,
const int size) {
unsigned int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = size;
if (tid >= N) return;
dev_dout[tid] = dev_dout_new[tid];
}
void Function_lrnBackward_gpu(float* dev_dout, float* dev_dout_new, float* dev_X, float* dev_y4,
float myAlpha, float myBeta, int myDepth_radius,
const int XN, const int XC, const int XH, const int XW) {
int size = XN*XC*XH*XW;
dim3 dimBlock1(BLOCK_SIZE);
dim3 dimGrid1((size + BLOCK_SIZE - 1) / BLOCK_SIZE);
if (dimGrid1.x > MAX_GRID_SIZE) {
cout << "dimension of Grid exceeds " << MAX_GRID_SIZE << " in 'Function_lrnBackward_gpu'!" << endl;
}
Kernel_Function_lrnBackward1 << < dimGrid1, dimBlock1 >> > (dev_dout, dev_dout_new, dev_X, dev_y4, myAlpha, myBeta, myDepth_radius, XN, XC, XH, XW);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
dim3 dimBlock2(BLOCK_SIZE);
dim3 dimGrid2((size + BLOCK_SIZE - 1) / BLOCK_SIZE);
if (dimGrid2.x > MAX_GRID_SIZE) {
cout << "dimension of Grid exceeds " << MAX_GRID_SIZE << " in 'Function_lrnBackward_gpu'!" << endl;
}
Kernel_Function_lrnBackward2 << < dimGrid2, dimBlock2 >> > (dev_dout, dev_dout_new, size);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
/*accuracy*/
__global__ void Kernel_Function_acc(float* dev_predict, int* dev_label, int* dev_acc_binary,
int N, int C_label, int C_output, int H, int W) {
unsigned int tid = blockDim.x*blockIdx.x + threadIdx.x;
unsigned int N_ = N*C_label*H*W;
int i, j, k, l, j_, idx_label, idx_predict, idx_max;
float tmp;
while (tid < N_)
{
idx4d(tid, N, C_output, H, W, i, j, k, l);
for (j_ = 0; j_ < C_output; j_++)
{
idx_predict = i*(C_output*H*W) + j_*(H*W) + k*(W)+l;
if (j_ == 0) tmp = dev_predict[idx_predict], idx_max = 0;
else if (dev_predict[idx_predict] > tmp)
{
tmp = dev_predict[idx_predict];
idx_max = j_;
}
}
idx4d(tid, N, C_label, H, W, i, j, k, l);
idx_label = i*(C_label*H*W) + j*(H*W) + k*(W)+l;
if (dev_label[idx_label] == idx_max) dev_acc_binary[idx_label] = 1;
else dev_acc_binary[idx_label] = 0;
tid += gridDim.x*blockDim.x;
}
}
void Function_acc_gpu(float* dev_predict, int* dev_label, int* dev_acc_binary,
int* image_shape, int the_number_of_class) {
int N = image_shape[0];
int C_label = 1, C_output = the_number_of_class;
int H = image_shape[2];
int W = image_shape[3];
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
Kernel_Function_acc << < dimGrid, dimBlock >> > (dev_predict, dev_label, dev_acc_binary, N, C_label, C_output, H, W);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
__global__ void Kernel_Function_acc_dice(float* dev_predict, int* dev_label, int* dev_predict_binary, int label,
int N, int C_label, int C_output, int H, int W) {
unsigned int tid = blockDim.x*blockIdx.x + threadIdx.x;
unsigned int N_ = N*C_label*H*W;
int i, j, k, l, j_, idx_label, idx_predict, idx_max;
float tmp;
while (tid < N_)
{
idx4d(tid, N, C_output, H, W, i, j, k, l);
for (j_ = 0; j_ < C_output; j_++)
{
idx_predict = i*(C_output*H*W) + j_*(H*W) + k*(W)+l;
if (j_ == 0) tmp = dev_predict[idx_predict], idx_max = 0;
else if (dev_predict[idx_predict] > tmp)
{
tmp = dev_predict[idx_predict];
idx_max = j_;
}
}
idx4d(tid, N, C_label, H, W, i, j, k, l);
idx_label = i*(C_label*H*W) + j*(H*W) + k*(W)+l;
if (idx_max == label) dev_predict_binary[idx_label] = 1;
else dev_predict_binary[idx_label] = 0;
if (dev_label[idx_label] != 0 && dev_label[idx_label] != 255) dev_label[idx_label] = 1;
else dev_label[idx_label] = 0;
tid += gridDim.x*blockDim.x;
}
}
void Function_acc_dice_gpu(float* dev_predict, int* dev_label, int* dev_predict_binary, int label,
int* image_shape, int the_number_of_class) {
int N = image_shape[0];
int C_label = 1, C_output = the_number_of_class;
int H = image_shape[2];
int W = image_shape[3];
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
Kernel_Function_acc_dice << < dimGrid, dimBlock >> > (dev_predict, dev_label, dev_predict_binary, label, N, C_label, C_output, H, W);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
__global__ void Kernel_Function_acc_iou(float* dev_predict, int* dev_predict_index,
int N, int C_label, int C_output, int H, int W) {
unsigned int tid = blockDim.x*blockIdx.x + threadIdx.x;
unsigned int N_ = N*C_label*H*W;
int i, j, k, l, j_, idx_label, idx_predict, idx_max;
float tmp;
while (tid < N_)
{
idx4d(tid, N, C_output, H, W, i, j, k, l);
for (j_ = 0; j_ < C_output; j_++)
{
idx_predict = i*(C_output*H*W) + j_*(H*W) + k*(W)+l;
if (j_ == 0) tmp = dev_predict[idx_predict], idx_max = 0;
else if (dev_predict[idx_predict] > tmp)
{
tmp = dev_predict[idx_predict];
idx_max = j_;
}
}
idx4d(tid, N, C_label, H, W, i, j, k, l);
idx_label = i*(C_label*H*W) + j*(H*W) + k*(W)+l;
dev_predict_index[idx_label] = idx_max;
tid += gridDim.x*blockDim.x;
}
}
void Function_acc_iou_gpu(float* dev_predict, int* dev_predict_index,
int* image_shape, int the_number_of_class) {
int N = image_shape[0];
int C_label = 1, C_output = the_number_of_class;
int H = image_shape[2];
int W = image_shape[3];
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
Kernel_Function_acc_iou << < dimGrid, dimBlock >> > (dev_predict, dev_predict_index, N, C_label, C_output, H, W);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
int** Function_confusion_matrix(/*int** confusion_matrix, */int* predict, int* gt, int size, int the_number_of_class)
{
int** confusion_matrix = new int*[the_number_of_class];
for (int i = 0; i < the_number_of_class; i++) confusion_matrix[i] = new int[the_number_of_class];
for (int i = 0; i < the_number_of_class; i++) memset(confusion_matrix[i], 0, the_number_of_class * sizeof(int));
//row(i):ground-truth image, column(j):predicted image
for (int i = 0; i < the_number_of_class; i++)
{
for (int j = 0; j < the_number_of_class; j++)
{
for (int pixel = 0; pixel < size; pixel++)
{
if (gt[pixel] != 255 && gt[pixel] != 0)
{
if (gt[pixel] == i + 1 && predict[pixel] == j + 1) confusion_matrix[i][j] += 1;
}
}
}
}
return confusion_matrix;
}
void accuracy_top5(float* x, const int size)
{
set<int> index_top5;
float temp = 0;
int index;
for (int n = 0; n < 5; n++)
{
temp = 0;
for (int i = 0; i < size; i++)
{
if (x[i] > temp && index_top5.find(i) == index_top5.end())
{
temp = x[i];
index = i;
}
}
index_top5.insert(index);
}
set<int>::iterator iter;
for (iter = index_top5.begin(); iter != index_top5.end(); iter++)
{
cout << "index of top5 : " << *iter << ", score : " << x[*iter] * 100 << "(%)" << endl;
}
}
/*concat*/
__global__ void Kernel_Function_concatForward(float* dev_out, float* dev_x1, float* dev_x2,
int N, int C1, int C2, int H, int W)
{
unsigned int tid = blockDim.x*blockIdx.x + threadIdx.x;
int C = C1 + C2;
int N_max = N*C*H*W;
int i, j, k, l, idx, idx_x1, idx_x2;
while (tid < N_max)
{
idx4d(tid, N, C, H, W, i, j, k, l);
idx = i*(C*H*W) + j*(H*W) + k*(W)+l;
idx_x1 = i*(C1*H*W) + j*(H*W) + k*(W)+l;
idx_x2 = i*(C2*H*W) + (j - C1)*(H*W) + k*(W)+l;
if (j < C1)
{
dev_out[idx] = dev_x1[idx_x1];
}
else
{
dev_out[idx] = dev_x2[idx_x2];
}
tid += gridDim.x*blockDim.x;
}
}
void Function_concatForward_gpu(float* dev_out, float* dev_x1, float* dev_x2,
int N, int C1, int C2, int H, int W)
{
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
Kernel_Function_concatForward << < dimGrid, dimBlock >> > (dev_out, dev_x1, dev_x2, N, C1, C2, H, W);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
__global__ void Kernel_Function_concatBackward(float* dev_dout1, float* dev_dout2, float* dev_dout,
int N, int C1, int C2, int H, int W)
{
unsigned int tid = blockDim.x*blockIdx.x + threadIdx.x;
int C = C1 + C2;
int N_max = N*C*H*W;
int i, j, k, l, idx, idx_dout1, idx_dout2;
while (tid < N_max)
{
idx4d(tid, N, C, H, W, i, j, k, l);
idx = i*(C*H*W) + j*(H*W) + k*(W)+l;
idx_dout1 = i*(C1*H*W) + j*(H*W) + k*(W)+l;
idx_dout2 = i*(C2*H*W) + (j - C1)*(H*W) + k*(W)+l;
if (j < C1)
{
dev_dout1[idx_dout1] = dev_dout[idx];
}
else
{
dev_dout2[idx_dout2] = dev_dout[idx];
}
tid += gridDim.x*blockDim.x;
}
}
void Function_concatBackward_gpu(float* dev_dout1, float* dev_dout2, float* dev_dout,
int N, int C1, int C2, int H, int W)
{
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
Kernel_Function_concatBackward << < dimGrid, dimBlock >> > (dev_dout1, dev_dout2, dev_dout, N, C1, C2, H, W);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
/*optimizer*/
__global__ void Kernel_Function_update_sgd(float lr, float* dev_parameter, float* dev_gradient, int size) {
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = size;
while (tid < N)
{
dev_parameter[tid] -= lr * dev_gradient[tid];
tid += gridDim.x*blockDim.x;
}
}
void Function_update_sgd_gpu(float lr, float* dev_parameter, float* dev_gradient, int size)
{
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
Kernel_Function_update_sgd << < dimGrid, dimBlock >> > (lr, dev_parameter, dev_gradient, size);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
void Function_update_sgd_cpu(float lr, float* parameter, float* gradient, int size)
{
for (int i = 0; i < size; i++)
parameter[i] -= lr * gradient[i];
}
__global__ void Kernel_Function_update_rmsprop(float lr, float dr, float* dev_parameter, float* dev_gradient, float* dev_h, int size) {
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = size;
while (tid < N)
{
dev_h[tid] *= dr;
dev_h[tid] += (1 - dr) *dev_gradient[tid] * dev_gradient[tid];
dev_parameter[tid] -= lr * dev_gradient[tid] / (sqrt(dev_h[tid]) + 1e-7);
tid += gridDim.x*blockDim.x;
}
}
void Function_update_rmsprop_gpu(float lr, float dr, float* dev_parameter, float* dev_gradient, float* dev_h, int size) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
Kernel_Function_update_rmsprop << < dimGrid, dimBlock >> > (lr, dr, dev_parameter, dev_gradient, dev_h, size);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
//////////////////////////////////////////////////////// src ver2 ////////////////////////////////////////////////////////
//new and delete
template <typename _type>
void new_cpu(_type* &src, int buffer) {
src = new _type[buffer];
memset(src, 0, buffer * sizeof(_type));
}
template <typename _type>
void delete_cpu(_type* &src) {
delete[] src;
src = NULL;
}
template <typename _type>
void new_gpu(_type* &src, int buffer) {
gpuErrchk(cudaMalloc((void**)&src, buffer * sizeof(_type)));
gpuErrchk(cudaMemset(src, 0, buffer * sizeof(_type)));
}
template <typename _type>
void delete_gpu(_type* &src) {
gpuErrchk(cudaFree(src));
src = NULL;
}
float* padding(float* x, int pad, int N, int C, int H, int W) {
int idx, idx_pad;
int H_pad = H + 2 * pad;
int W_pad = W + 2 * pad;
int buffer = N*C*H_pad*W_pad;
float* x_pad = NULL;
new_cpu<float>(x_pad, buffer);
for (int i = 0; i < N; i++) {
for (int j = 0; j < C; j++) {
for (int k = 0; k < H; k++) {
for (int l = 0; l < W; l++) {
idx = i*(C*H*W) + j*(H*W) + k*(W)+l;
idx_pad = i*(C*H_pad*W_pad) + j*(H_pad*W_pad) + (k + pad)*(W_pad)+(l + pad);
x_pad[idx_pad] = x[idx];
}
}
}
}
delete_cpu<float>(x);
return x_pad;
}
__global__ void kernel_padding_forward(float* x_pad, float* x, int pad,
int N, int C, int H, int W,
int H_pad, int W_pad) {
int i, j, k, l;
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int _N = N*C*H*W;
int idx_pad, idx;
while (tid < _N)
{
idx4d(tid, N, C, H, W, i, j, k, l);
idx_pad = i*(C*H_pad*W_pad) + j*(H_pad*W_pad) + (k + pad)*(W_pad)+(l + pad);
idx = i*(C*H*W) + j*(H*W) + k*(W)+l;
x_pad[idx_pad] = x[idx];
tid += gridDim.x*blockDim.x;
}
}
float* padding_gpu(float* x, int pad, int N, int C, int H, int W) {
int H_pad = H + 2 * pad;
int W_pad = W + 2 * pad;
int buffer = N*C*H_pad*W_pad;
float* x_pad = NULL;
new_gpu<float>(x_pad, buffer);
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
kernel_padding_forward << < dimGrid, dimBlock >> > (x_pad, x, pad, N, C, H, W, H_pad, W_pad);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
delete_gpu<float>(x);
return x_pad;
}
float* padding(float* dx, int pad, int N, int C, int H, int W, int stride)
{
int dH = H + 2 * pad + stride - 1;
int dW = W + 2 * pad + stride - 1;
int buffer = N*C*H*W;
float* dx_pad = NULL;
new_cpu<float>(dx_pad, buffer);
int idx_dx, idx_dx_pad;
for (int i = 0; i < N; i++) {
for (int j = 0; j < C; j++) {
for (int k = 0; k < H; k++) {
for (int l = 0; l < W; l++) {
idx_dx_pad = i*(C*H*W) + j*(H*W) + k*(W)+l;
idx_dx = i*(C*dH*dW) + j*(dH*dW) + (k + pad)*(dW)+(l + pad);
dx_pad[idx_dx_pad] = dx[idx_dx];
}
}
}
}
delete_cpu<float>(dx);
return dx_pad;
}
__global__ void kernel_padding_backward(float* dx_pad, float* dx, int pad,
int N, int C, int H, int W,
int dH, int dW) {
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int _N = N*C*H*W;
int i = 0, j = 0, k = 0, l = 0, idx_dx_pad, idx_dx;
while (tid < _N)
{
idx4d(tid, N, C, H, W, i, j, k, l);
idx_dx_pad = i*(C*H*W) + j*(H*W) + k*(W)+l;
idx_dx = i*(C*dH*dW) + j*(dH*dW) + (k + pad)*(dW)+(l + pad);
dx_pad[idx_dx_pad] = dx[idx_dx];
tid += gridDim.x*blockDim.x;
}
}
float* padding_gpu(float* dx, int pad, int N, int C, int H, int W, int stride) {
int dH = H + 2 * pad + stride - 1;
int dW = W + 2 * pad + stride - 1;
int buffer = N*C*H*W;
float* dx_pad = NULL;
new_gpu<float>(dx_pad, buffer);
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
kernel_padding_backward << < dimGrid, dimBlock >> > (dx_pad, dx, pad, N, C, H, W, dH, dW);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
delete_gpu<float>(dx);
return dx_pad;
}
float* stride_forward(float* img, int stride,
int XN, int XC, int FH, int FW, int OH, int OW, int XH, int XW) {
int buffer = XN*XC*FH*FW*OH*OW;
float* col = NULL;
new_cpu<float>(col, buffer);
int y_max, x_max;
int idx_col, idx_img;
for (int i = 0; i < XN; i++) {
for (int j = 0; j < XC; j++) {
for (int k = 0; k < FH; k++) {
y_max = k + stride*OH;
for (int l = 0; l < FW; l++) {
x_max = l + stride*OW;
for (int a = k, int m = 0; a < y_max; a = a + stride, m++) {
for (int b = l, int n = 0; b < x_max; b = b + stride, n++) {
idx_col = i*(XC*FH*FW*OH*OW) + j*(FH*FW*OH*OW) + k*(FW*OH*OW) + l*(OH*OW) + m*(OW)+n;
idx_img = i*(XC*XH*XW) + j*(XH*XW) + a*(XW)+b;
col[idx_col] = img[idx_img];
}
}
}
}
}
}
delete_cpu<float>(img);
return col;
}
__global__ void kernel_stride_forward(float* col, float* img, int stride,
int XN, int XC, int FH, int FW, int OH, int OW, int XH, int XW) {
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = XN*XC*FH*FW*OH*OW;
int i, j, k, l, m, n, a, b;
int idx_col;
int idx_img;
while (tid < N)
{
idx6d(tid, XN, XC, FH, FW, OH, OW, i, j, k, l, m, n);
a = k + m*stride;
b = l + n*stride;
idx_col = i*(XC*FH*FW*OH*OW) + j*(FH*FW*OH*OW) + k*(FW*OH*OW) + l*(OH*OW) + m*(OW)+n;
idx_img = i*(XC*XH*XW) + j*(XH*XW) + a*(XW)+b;
col[idx_col] = img[idx_img];
tid += gridDim.x * blockDim.x;
}
}
float* stride_forward_gpu(float* img, int stride,
int XN, int XC, int FH, int FW, int OH, int OW, int XH, int XW) {
int buffer = XN*XC*FH*FW*OH*OW;
float* col = NULL;
new_gpu<float>(col, buffer);
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
kernel_stride_forward << < dimGrid, dimBlock >> > (col, img, stride, XN, XC, FH, FW, OH, OW, XH, XW);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
delete_gpu<float>(img);
return col;
}
float* stride_backward(float* col, int stride,
int XN, int XC, int FH, int FW, int OH, int OW, int XH, int XW) {
int buffer = XN*XC*XH*XW;
float* img = NULL;
new_cpu<float>(img, buffer);
int y_max, x_max;
int idx_img, idx_col;
for (int i = 0; i < XN; i++) {
for (int j = 0; j < XC; j++) {
for (int k = 0; k < FH; k++) {
y_max = k + stride*OH;
for (int l = 0; l < FW; l++) {
x_max = l + stride*OW;
for (int a = k, int m = 0; a < y_max; a = a + stride, m++) {
for (int b = l, int n = 0; b < x_max; b = b + stride, n++) {
idx_img = i*(XC*XH*XW) + j*(XH*XW) + a*(XW)+b;
idx_col = i*(XC*FH*FW*OH*OW) + j*(FH*FW*OH*OW) + k*(FW*OH*OW) + l*(OH*OW) + m*(OW)+n;
img[idx_img] += col[idx_col];
}
}
}
}
}
}
delete_cpu<float>(col);
return img;
}
__global__ void kernel_stride_backward(float* img, float* col, int stride,
int XN, int XC, int FH, int FW, int OH, int OW, int XH, int XW) {
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = XN*XC*XH*XW;
int i, j, a, b, idx_img, idx_col;
int k, l, m, n, temp;
while (tid < N)
{
idx4d(tid, XN, XC, XH, XW, i, j, a, b);
idx_img = i*(XC*XH*XW) + j*(XH*XW) + a*(XW)+b;
for (k = 0; k < FH && k <= a; k++)
{
m = (a - k) / stride;
temp = k + stride*m;
if (temp != a || m >= OH)
continue;
for (l = 0; l < FW && l <= b; l++)
{
n = (b - l) / stride;
temp = l + stride*n;
if (temp != b || n >= OW)
continue;
idx_col = i*(XC*FH*FW*OH*OW) + j*(FH*FW*OH*OW) + k*(FW*OH*OW) + l*(OH*OW) + m*(OW)+n;
img[idx_img] += col[idx_col];
}
}
tid += gridDim.x*blockDim.x;
}
}
float* stride_backward_gpu(float* col, int stride,
int XN, int XC, int FH, int FW, int OH, int OW, int XH, int XW) {
int buffer = XN*XC*XH*XW;
float* img = NULL;
new_gpu<float>(img, buffer);
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
kernel_stride_backward << < dimGrid, dimBlock >> > (img, col, stride, XN, XC, FH, FW, OH, OW, XH, XW);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
delete_gpu<float>(col);
return img;
}
//dim=6
float* transpose(float* x,
int _dim0, int _dim1, int _dim2, int _dim3, int _dim4, int _dim5,
int idx_new_dim0, int idx_new_dim1, int idx_new_dim2, int idx_new_dim3, int idx_new_dim4, int idx_new_dim5) {
int old_dims[6] = { _dim0, _dim1, _dim2, _dim3, _dim4, _dim5 };
int new_dims[6] = { 0 };
new_dims[0] = old_dims[idx_new_dim0];
new_dims[1] = old_dims[idx_new_dim1];
new_dims[2] = old_dims[idx_new_dim2];
new_dims[3] = old_dims[idx_new_dim3];
new_dims[4] = old_dims[idx_new_dim4];
new_dims[5] = old_dims[idx_new_dim5];
int i = 0, j = 0, k = 0, l = 0, m = 0, n = 0;
int* old_idx[6] = { &i, &j, &k, &l, &m, &n };
int* i_new = old_idx[idx_new_dim0];
int* j_new = old_idx[idx_new_dim1];
int* k_new = old_idx[idx_new_dim2];
int* l_new = old_idx[idx_new_dim3];
int* m_new = old_idx[idx_new_dim4];
int* n_new = old_idx[idx_new_dim5];
int buffer = _dim0*_dim1*_dim2*_dim3*_dim4*_dim5;
float* x_transpose = NULL;
new_cpu<float>(x_transpose, buffer);
int idx, idx_transpose;
for (i = 0; i < _dim0; i++) {
for (j = 0; j < _dim1; j++) {
for (k = 0; k < _dim2; k++) {
for (l = 0; l < _dim3; l++) {
for (m = 0; m < _dim4; m++) {
for (n = 0; n < _dim5; n++) {
idx_transpose = (*i_new) * (new_dims[1] * new_dims[2] * new_dims[3] * new_dims[4] * new_dims[5])
+ (*j_new) * (new_dims[2] * new_dims[3] * new_dims[4] * new_dims[5])
+ (*k_new) * (new_dims[3] * new_dims[4] * new_dims[5])
+ (*l_new) * (new_dims[4] * new_dims[5])
+ (*m_new) * (new_dims[5])
+ (*n_new);
idx = i*(_dim1*_dim2*_dim3*_dim4*_dim5) + j*(_dim2*_dim3*_dim4*_dim5) + k*(_dim3*_dim4*_dim5) + l*(_dim4*_dim5) + m*(_dim5)+n;
x_transpose[idx_transpose] = x[idx];
}
}
}
}
}
}
delete_cpu<float>(x);
return x_transpose;
}
__global__ void kernel_transpose_6(float* x_transpose, float* x,
int _dim0, int _dim1, int _dim2, int _dim3, int _dim4, int _dim5,
int idx_new_dim0, int idx_new_dim1, int idx_new_dim2, int idx_new_dim3, int idx_new_dim4, int idx_new_dim5) {
int old_dims[6] = { _dim0, _dim1, _dim2, _dim3, _dim4, _dim5 };
int new_dims[6] = { 0 };
new_dims[0] = old_dims[idx_new_dim0];
new_dims[1] = old_dims[idx_new_dim1];
new_dims[2] = old_dims[idx_new_dim2];
new_dims[3] = old_dims[idx_new_dim3];
new_dims[4] = old_dims[idx_new_dim4];
new_dims[5] = old_dims[idx_new_dim5];
int i = 0, j = 0, k = 0, l = 0, m = 0, n = 0;
int* old_idx[6] = { &i, &j, &k, &l, &m, &n };
int* i_new = old_idx[idx_new_dim0];
int* j_new = old_idx[idx_new_dim1];
int* k_new = old_idx[idx_new_dim2];
int* l_new = old_idx[idx_new_dim3];
int* m_new = old_idx[idx_new_dim4];
int* n_new = old_idx[idx_new_dim5];
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = _dim0*_dim1*_dim2*_dim3*_dim4*_dim5;
int idx_transpose;
int idx;
while (tid < N)
{
idx6d(tid, _dim0, _dim1, _dim2, _dim3, _dim4, _dim5, i, j, k, l, m, n);
idx_transpose = (*i_new) * (new_dims[1] * new_dims[2] * new_dims[3] * new_dims[4] * new_dims[5])
+ (*j_new) * (new_dims[2] * new_dims[3] * new_dims[4] * new_dims[5])
+ (*k_new) * (new_dims[3] * new_dims[4] * new_dims[5])
+ (*l_new) * (new_dims[4] * new_dims[5])
+ (*m_new) * (new_dims[5])
+ (*n_new);
idx = i*(_dim1*_dim2*_dim3*_dim4*_dim5) + j*(_dim2*_dim3*_dim4*_dim5) + k*(_dim3*_dim4*_dim5) + l*(_dim4*_dim5) + m*(_dim5)+n;
x_transpose[idx_transpose] = x[idx];
tid += gridDim.x *blockDim.x;
}
}
float* transpose_gpu(float* x,
int _dim0, int _dim1, int _dim2, int _dim3, int _dim4, int _dim5,
int idx_new_dim0, int idx_new_dim1, int idx_new_dim2, int idx_new_dim3, int idx_new_dim4, int idx_new_dim5) {
int buffer = _dim0*_dim1*_dim2*_dim3*_dim4*_dim5;
float* x_transpose = NULL;
new_gpu<float>(x_transpose, buffer);
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
kernel_transpose_6 << < dimGrid, dimBlock >> > (x_transpose, x,
_dim0, _dim1, _dim2, _dim3, _dim4, _dim5,
idx_new_dim0, idx_new_dim1, idx_new_dim2, idx_new_dim3, idx_new_dim4, idx_new_dim5);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
delete_gpu<float>(x);
return x_transpose;
}
//dim=4
float* transpose(float* x,
int _dim0, int _dim1, int _dim2, int _dim3,
int idx_new_dim0, int idx_new_dim1, int idx_new_dim2, int idx_new_dim3) {
int old_dims[4] = { _dim0, _dim1, _dim2, _dim3 };
int new_dims[4] = { 0 };
new_dims[0] = old_dims[idx_new_dim0];
new_dims[1] = old_dims[idx_new_dim1];
new_dims[2] = old_dims[idx_new_dim2];
new_dims[3] = old_dims[idx_new_dim3];
int i = 0, j = 0, k = 0, l = 0;
int* old_idx[4] = { &i, &j, &k, &l };
int* i_new = old_idx[idx_new_dim0];
int* j_new = old_idx[idx_new_dim1];
int* k_new = old_idx[idx_new_dim2];
int* l_new = old_idx[idx_new_dim3];
int buffer = _dim0*_dim1*_dim2*_dim3;
float* x_transpose = NULL;
new_cpu<float>(x_transpose, buffer);
int idx, idx_transpose;
for (i = 0; i < _dim0; i++) {
for (j = 0; j < _dim1; j++) {
for (k = 0; k < _dim2; k++) {
for (l = 0; l < _dim3; l++) {
idx_transpose = (*i_new) * (new_dims[1] * new_dims[2] * new_dims[3])
+ (*j_new) * (new_dims[2] * new_dims[3])
+ (*k_new) * (new_dims[3])
+ (*l_new);
idx = i*(_dim1*_dim2*_dim3) + j*(_dim2*_dim3) + k*(_dim3)+l;
x_transpose[idx_transpose] = x[idx];
}
}
}
}
delete_cpu<float>(x);
return x_transpose;
}
__global__ void kernel_transpose_4(float* x_transpose, float* x,
int _dim0, int _dim1, int _dim2, int _dim3,
int idx_new_dim0, int idx_new_dim1, int idx_new_dim2, int idx_new_dim3) {
int old_dims[6] = { _dim0, _dim1, _dim2, _dim3 };
int new_dims[6] = { 0 };
new_dims[0] = old_dims[idx_new_dim0];
new_dims[1] = old_dims[idx_new_dim1];
new_dims[2] = old_dims[idx_new_dim2];
new_dims[3] = old_dims[idx_new_dim3];
int i = 0, j = 0, k = 0, l = 0;
int* old_idx[6] = { &i, &j, &k, &l };
int* i_new = old_idx[idx_new_dim0];
int* j_new = old_idx[idx_new_dim1];
int* k_new = old_idx[idx_new_dim2];
int* l_new = old_idx[idx_new_dim3];
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = _dim0*_dim1*_dim2*_dim3;
int idx_transpose;
int idx;
while (tid < N)
{
idx4d(tid, _dim0, _dim1, _dim2, _dim3, i, j, k, l);
idx_transpose = (*i_new) * (new_dims[1] * new_dims[2] * new_dims[3])
+ (*j_new) * (new_dims[2] * new_dims[3])
+ (*k_new) * (new_dims[3])
+ (*l_new);
idx = i*(_dim1*_dim2*_dim3) + j*(_dim2*_dim3) + k*(_dim3)+l;
x_transpose[idx_transpose] = x[idx];
tid += gridDim.x *blockDim.x;
}
}
float* transpose_gpu(float* x,
int _dim0, int _dim1, int _dim2, int _dim3,
int idx_new_dim0, int idx_new_dim1, int idx_new_dim2, int idx_new_dim3) {
int buffer = _dim0*_dim1*_dim2*_dim3;
float* x_transpose = NULL;
new_gpu<float>(x_transpose, buffer);
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
kernel_transpose_4 << < dimGrid, dimBlock >> > (x_transpose, x,
_dim0, _dim1, _dim2, _dim3,
idx_new_dim0, idx_new_dim1, idx_new_dim2, idx_new_dim3);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
delete_gpu<float>(x);
return x_transpose;
}
//dim=2
float* transpose(float* x,
int _dim0, int _dim1) {
int buffer = _dim0*_dim1;
float* x_transpose = NULL;
new_cpu<float>(x_transpose, buffer);
int idx, idx_transpose;
for (int i = 0; i < _dim0; i++) {
for (int j = 0; j < _dim1; j++) {
idx = i*_dim1 + j;
idx_transpose = j*_dim0 + i;
x_transpose[idx_transpose] = x[idx];
}
}
delete_cpu<float>(x);
return x_transpose;
}
__global__ void kernel_transpose_2(float* x_transpose, float* x,
int _dim0, int _dim1) {
unsigned int i = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int j = blockDim.y * blockIdx.y + threadIdx.y;
if (i >= _dim0 || j >= _dim1) return;
int idx, idx_transpose;
idx = i*_dim1 + j;
idx_transpose = j*_dim0 + i;
x_transpose[idx_transpose] = x[idx];
}
float* transpose_gpu(float* x,
int _dim0, int _dim1) {
int buffer = _dim0*_dim1;
float* x_transpose = NULL;
new_gpu<float>(x_transpose, buffer);
dim3 dimBlock(BLOCK_SIZE_X, BLOCK_SIZE_Y);
dim3 dimGrid((_dim0 + BLOCK_SIZE_X - 1) / BLOCK_SIZE_X, (_dim1 + BLOCK_SIZE_Y - 1) / BLOCK_SIZE_Y);
kernel_transpose_2 << < dimGrid, dimBlock >> > (x_transpose, x, _dim0, _dim1);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
delete_gpu<float>(x);
return x_transpose;
}
float* dot(float* A, float* B,
int r, int c, int n) {
int buffer = r*c;
float* out = NULL;
new_cpu<float>(out, buffer);
float temp;
for (int i = 0; i < r; i++) {
for (int j = 0; j < c; j++) {
temp = 0.0;
for (int k = 0; k < n; k++) {
temp += A[i*n + k] * B[k*c + j];
}
out[i*c + j] = temp;
}
}
return out;
}
__global__ void kernel_dot(float* out, float* A, float* B,
int r, int c, int n) {
unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int N = r*c;
int i, j;
float temp, A_val, B_val;
while (tid < N)
{
temp = 0.0;
A_val = 0.0;
B_val = 0.0;
idx2d(tid, r, c, i, j);
for (int k = 0; k < n; k++) {
A_val = A[i*n + k];
B_val = B[k*c + j];
temp += A_val*B_val;
}
out[i*c + j] = temp;
tid += gridDim.x*blockDim.x;
}
}
float* dot_gpu(float* A, float* B,
int r, int c, int n) {
int buffer = r*c;
float* out = NULL;
new_gpu<float>(out, buffer);
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
kernel_dot << < dimGrid, dimBlock >> > (out, A, B, r, c, n);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
return out;
}
void _dot(float* out, float* A, float* B,
int r, int c, int n) {
float temp;
for (int i = 0; i < r; i++) {
for (int j = 0; j < c; j++) {
temp = 0.0;
for (int k = 0; k < n; k++) {
temp += A[i*n + k] * B[k*c + j];
}
out[i*c + j] = temp;
}
}
}
void _dot_gpu(float* out, float* A, float* B,
int r, int c, int n) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
kernel_dot << < dimGrid, dimBlock >> > (out, A, B, r, c, n);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
void sum_forward(float* x, float* b,
int r, int c) {
for (int i = 0; i < r; i++) {
for (int j = 0; j < c; j++) {
x[i*c + j] += b[j];
}
}
}
__global__ void kernel_sum_forward(float* x, float* b,
int r, int c) {
unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x;
int N = r*c;
int i = 0, j = 0;
while (tid < N)
{
idx2d(tid, r, c, i, j);
x[i*c + j] += b[j];
tid += gridDim.x*blockDim.x;
}
}
void sum_forward_gpu(float* x, float* b,
int r, int c) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
kernel_sum_forward << < dimGrid, dimBlock >> > (x, b, r, c);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
void sum_backward(float* db, float* dout,
int r, int c) {
memset(db, 0, c * sizeof(float));
for (int j = 0; j < c; j++) {
for (int i = 0; i < r; i++) {
db[j] += dout[i*c + j];
}
}
}
__global__ void kernel_sum_backward(float* db, float* dout,
int r, int c) {
unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x;
int N = c;
while (tid < N)
{
for (int i = 0; i < r; i++) {
db[tid] += dout[i*c + tid];
}
tid += gridDim.x*blockDim.x;
}
}
void sum_backward_gpu(float* db, float* dout,
int r, int c) {
cudaMemset(db, 0, c * sizeof(float));
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
kernel_sum_backward << < dimGrid, dimBlock >> > (db, dout, r, c);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
template <unsigned int blockSize>
__device__ void warpReduce(volatile float* sdata, int tid)
{
if (blockSize >= 64) sdata[tid] += sdata[tid + 32];
if (blockSize >= 32) sdata[tid] += sdata[tid + 16];
if (blockSize >= 16) sdata[tid] += sdata[tid + 8];
if (blockSize >= 8) sdata[tid] += sdata[tid + 4];
if (blockSize >= 4) sdata[tid] += sdata[tid + 2];
if (blockSize >= 2) sdata[tid] += sdata[tid + 1];
}
__global__ void kernel_sum_backward_opt1(float* sum, float* dout, int r, int c) {
__shared__ float sdata[(BLOCK_SIZE_opt / 2)];
unsigned int tid = threadIdx.x;
unsigned int i = (blockDim.x * 2) * blockIdx.x + threadIdx.x;
//if (i >= r) return;
for (int j = 0; j < c; j++) {
sdata[tid] = dout[i*c + j] + dout[(i + blockDim.x)*c + j];
__syncthreads();
if (blockDim.x >= 512) {
if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads();
}
if (blockDim.x >= 256) {
if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads();
}
if (blockDim.x >= 128) {
if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads();
}
if (tid < 32) warpReduce<BLOCK_SIZE_opt / 2>(sdata, tid);
if (tid == 0) sum[blockIdx.x*c + j] = sdata[0];
__syncthreads();
}
}
__global__ void Kernel_Sum_backward_opt2(float* db, float* sum, int r_sum, int c) {
unsigned int j = blockDim.x * blockIdx.x + threadIdx.x;
if (j >= c) return;
float temp = 0;
for (int i = 0; i < r_sum; i++) {
temp += sum[i*c + j];
}
db[j] = temp;
}
void sum_backward_gpu(float* db, float* dout,
int r, int c, bool use_sharedMemory)
{
int buffer = (r + BLOCK_SIZE_opt - 1) / BLOCK_SIZE_opt * c;
float* sum = NULL;
new_gpu<float>(sum, buffer);
dim3 dimBlock1(BLOCK_SIZE_opt / 2); //halve the number of threads
dim3 dimGrid1((r + BLOCK_SIZE_opt - 1) / BLOCK_SIZE_opt);
kernel_sum_backward_opt1 << < dimGrid1, dimBlock1 >> > (sum, dout, r, c);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
int r_sum = buffer / c;
dim3 dimBlock2(BLOCK_SIZE_opt);
dim3 dimGrid2((c + BLOCK_SIZE_opt - 1) / BLOCK_SIZE_opt);
Kernel_Sum_backward_opt2 << < dimGrid2, dimBlock2 >> > (db, sum, r_sum, c);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
delete_gpu<float>(sum);
}
float* max_poolingForward(int* argMax, float* col,
int r, int c)
{
int buffer = r;
float* out = NULL;
new_cpu<float>(out, buffer);
new_cpu<int>(argMax, buffer);
float temp;
int idx;
for (int i = 0; i < r; i++) {
idx = 0;
temp = col[i*c + 0];
for (int j = 1; j < c; j++) {
if (col[i*c + j] > temp) {
temp = col[i*c + j];
idx = j;
}
}
argMax[i] = idx;
out[i] = temp;
}
delete_cpu<float>(col);
return out;
}
float* max_poolingForward(float* col,
int r, int c)
{
int buffer = r;
float* out = NULL;
new_cpu<float>(out, buffer);
float temp;
for (int i = 0; i < r; i++) {
temp = col[i*c + 0];
for (int j = 1; j < c; j++) {
if (col[i*c + j] > temp) {
temp = col[i*c + j];
}
}
out[i] = temp;
}
delete_cpu<float>(col);
return out;
}
float* avg_poolingForward(float* col,
int r, int c)
{
int buffer = r;
float* out = NULL;
new_cpu<float>(out, buffer);
float sum;
for (int i = 0; i < r; i++) {
sum = 0.0;
for (int j = 0; j < c; j++) {
sum += col[i*c + j];
}
out[i] = sum / c;
}
delete_cpu<float>(col);
return out;
}
float* max_poolingBackward(int* argMax, float* dout,
int r, int c) {
int buffer = r*c;
float* dcol = NULL;
new_cpu<float>(dcol, buffer);
for (int i = 0; i < r; i++) {
dcol[i*c + argMax[i]] = dout[i];
}
delete_cpu<float>(dout);
delete_cpu<int>(argMax);
return dcol;
}
float* avg_poolingBackward(float* dout,
int r, int c)
{
int buffer = r*c;
float* dcol = NULL;
new_cpu<float>(dcol, buffer);
for (int i = 0; i < r; i++) {
for (int j = 0; j < c; j++) {
dcol[i*c + j] = dout[i] / c;
}
}
delete_cpu<float>(dout);
return dcol;
}
__global__ void kernel_max_poolingForward_training(float* out, int* argMax, float* col,
int r, int c) {
unsigned int i = blockDim.x * blockIdx.x + threadIdx.x;
int N = r;
int idx;
float temp;
while (i < N)
{
temp = col[i*c + 0];
idx = 0;
for (int j = 1; j < c; j++) {
if (col[i*c + j] > temp) {
temp = col[i*c + j];
idx = j;
}
}
argMax[i] = idx;
out[i] = temp;
i += gridDim.x*blockDim.x;
}
}
float* max_poolingForward_gpu(int* argMax, float* col,
int r, int c) {
int buffer = r;
float* out = NULL;
new_gpu<float>(out, buffer);
new_gpu<int>(argMax, buffer);
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
kernel_max_poolingForward_training << < dimGrid, dimBlock >> > (out, argMax, col, r, c);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
delete_gpu<float>(col);
return out;
}
__global__ void kernel_max_poolingForward_inference(float* out, float* col,
int r, int c) {
unsigned int i = blockDim.x * blockIdx.x + threadIdx.x;
int N = r;
float temp;
while (i < N)
{
temp = col[i*c + 0];
for (int j = 1; j < c; j++) {
if (col[i*c + j] > temp) {
temp = col[i*c + j];
}
}
out[i] = temp;
i += gridDim.x*blockDim.x;
}
}
float* max_poolingForward_gpu(float* col,
int r, int c) {
int buffer = r;
float* out = NULL;
new_gpu<float>(out, buffer);
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
kernel_max_poolingForward_inference << < dimGrid, dimBlock >> > (out, col, r, c);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
delete_gpu<float>(col);
return out;
}
__global__ void kernel_avg_poolingForward(float* out, float* col,
int r, int c) {
unsigned int i = blockDim.x * blockIdx.x + threadIdx.x;
int N = r;
float sum;
while (i < N)
{
sum = 0.0;
for (int j = 0; j < c; j++) {
sum += col[i*c + j];
}
out[i] = sum / c;
i += gridDim.x*blockDim.x;
}
}
float* avg_poolingForward_gpu(float* col,
int r, int c) {
int buffer = r;
float* out = NULL;
new_gpu<float>(out, buffer);
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
kernel_avg_poolingForward << < dimGrid, dimBlock >> > (out, col, r, c);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
delete_gpu<float>(col);
return out;
}
__global__ void kernel_max_poolingBackward(float* dcol, int* argMax, float* dout,
int r, int c) {
unsigned int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = r*c;
int i, j;
while (tid < N)
{
idx2d(tid, r, c, i, j);
dcol[i*c + j] = 0;
dcol[i*c + (argMax[i])] = dout[i];
tid += gridDim.x*blockDim.x;
}
}
float* max_poolingBackward_gpu(int* argMax, float* dout,
int r, int c) {
int buffer = r*c;
float* dcol = NULL;
new_gpu<float>(dcol, buffer);
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
kernel_max_poolingBackward << < dimGrid, dimBlock >> > (dcol, argMax, dout, r, c);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
delete_gpu<float>(dout);
delete_gpu<int>(argMax);
return dcol;
}
__global__ void kernel_avg_poolingBackward(float* dcol, float* dout,
int r, int c) {
unsigned int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = r*c;
int i, j;
while (tid < N)
{
idx2d(tid, r, c, i, j);
dcol[i*c + j] = dout[i] / c;
tid += gridDim.x*blockDim.x;
}
}
float* avg_poolingBackward_gpu(float* dout,
int r, int c) {
int buffer = r*c;
float* dcol = NULL;
new_gpu<float>(dcol, buffer);
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
kernel_avg_poolingBackward << < dimGrid, dimBlock >> > (dcol, dout, r, c);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
delete_gpu<float>(dout);
return dcol;
}
__global__ void kernel_reluForward_training(float* x, int* index, int size, float negative_slope) {
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = size;
while (tid < N)
{
if (x[tid] > 0) index[tid] = 1;
else x[tid] *= negative_slope;
tid += gridDim.x*blockDim.x;
}
}
__global__ void kernel_reluForward_inference(float* x, int size, float negative_slope) {
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = size;
while (tid < N)
{
if (x[tid] <= 0) x[tid] *= negative_slope;
tid += gridDim.x*blockDim.x;
}
}
void reluForward_gpu(float* x, int* index, int size, float negative_slope) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
int buffer = size;
new_gpu<int>(index, buffer);
kernel_reluForward_training << < dimGrid, dimBlock >> > (x, index, size, negative_slope);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
void reluForward_gpu(float* x, int size, float negative_slope) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
kernel_reluForward_inference << < dimGrid, dimBlock >> > (x, size, negative_slope);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
__global__ void kernel_reluBackward(float* dout, int* index, int size, float negative_slope) {
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int N = size;
while (tid < N)
{
if (!index[tid]) dout[tid] *= negative_slope;
tid += gridDim.x*blockDim.x;
}
}
void reluBackward_gpu(float* dout, int* index, int size, float negative_slope) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
kernel_reluBackward << < dimGrid, dimBlock >> > (dout, index, size, negative_slope);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
delete_gpu<int>(index);
}
void softmax(float* x,
int r, int c) {
float temp1, temp2;
for (int i = 0; i < r; i++) {
temp1 = 0.;
temp2 = 0.;
for (int j = 0; j < c; j++)
{
temp1 = max(x[i*c + j], temp1);
}
for (int j = 0; j < c; j++)
{
x[i*c + j] = expf(x[i*c + j] - temp1);
temp2 += x[i*c + j];
}
for (int j = 0; j < c; j++) x[i*c + j] /= temp2;
}
}
__global__ void kernel_softmax(float* x, int r, int c) {
unsigned int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= r) return;
float temp1 = 0., temp2 = 0.;
for (int j = 0; j < c; j++) temp1 = max(x[i*c + j], temp1);
for (int j = 0; j < c; j++) {
x[i*c + j] = expf(x[i*c + j] - temp1);
temp2 += x[i*c + j];
}
for (int j = 0; j < c; j++) x[i*c + j] /= temp2;
}
void softmax_gpu(float* x,
int r, int c) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid((r + BLOCK_SIZE - 1) / BLOCK_SIZE);
if (dimGrid.x > MAX_GRID_SIZE) {
cout << "dimension of Grid exceeds " << MAX_GRID_SIZE << " in 'Softmax_gpu'!" << endl;
}
kernel_softmax << < dimGrid, dimBlock >> > (x, r, c);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
}
float CEE(float* x, int* t,
int r, int c) {
float temp = 0;
for (int i = 0; i < r; i++) {
for (int j = 0; j < c; j++) {
if (t[i*c + j] == 1) { //one-hot encoding
temp += log(x[i*c + j] + 1e-7);
continue;
}
}
}
temp /= -r;
return temp;
}
__global__ void kernel_CEE(float* x, int* t, float* loss,
int r, int c) {
int i = blockDim.x*blockIdx.x + threadIdx.x;
int N = r;
float temp;
while (i < N)
{
for (int j = 0; j < c; j++) {
if (t[i*c + j] == 1) {
temp = logf(x[i*c + j] + 1e-7);
atomicAdd(loss, temp);
continue;
}
}
i += gridDim.x*blockDim.x;
}
}
float CEE_gpu(float* x, int* t, float* loss,
int r, int c) {
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
cudaMemset(loss, 0, sizeof(float));
kernel_CEE << < dimGrid, dimBlock >> > (x, t, loss, r, c);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
float _loss = 0;
cudaMemcpy(&_loss, loss, sizeof(float), cudaMemcpyDeviceToHost);
_loss /= -r;
return _loss;
}
__global__ void kernel_softmaxBackward(float* dx, float* y, int* t,
int r, int c) {
unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x;
int N = r*c;
while (tid < N)
{
dx[tid] = (y[tid] - t[tid]) / r;
tid += gridDim.x*blockDim.x;
}
}
float* softmaxBackward_gpu(float* y, int* t,
int r, int c) {
int buffer = r*c;
float* dx = NULL;
new_gpu<float>(dx, buffer);
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE);
kernel_softmaxBackward << < dimGrid, dimBlock >> > (dx, y, t, r, c);
cudaDeviceSynchronize();
gpuErrchk(cudaGetLastError());
return dx;
} |
0761e131233014dfa24864c4fc2f9895689882e3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**atomic operation
*
*
* UNIX
*
*/
#include <stdio.h>
#define NUM_THREADS 10000
#define SIZE 10
#define BLOCK_WIDTH 100
__global__ void gpu_increment_without_atomic(int *d_a)
{
// Calculate thread id for current thread
int tid = blockIdx.x * blockDim.x + threadIdx.x;
// each thread increments elements wrapping at SIZE variable
tid = tid % SIZE;
d_a[tid] += 1;
}
int main(int argc, char **argv)
{
printf("%d total threads in %d blocks writing into %d array elements\n", NUM_THREADS, NUM_THREADS / BLOCK_WIDTH, SIZE);
// declare and allocate host memory
int h_a[SIZE];
const int ARRAY_BYTES = SIZE * sizeof(int);
// declare and allocate GPU memory
int * d_a;
hipMalloc((void **)&d_a, ARRAY_BYTES);
//Initialize GPU memory to zero
hipMemset((void *)d_a, 0, ARRAY_BYTES);
gpu_increment_without_atomic << <NUM_THREADS / BLOCK_WIDTH, BLOCK_WIDTH >> >(d_a);
// copy back the array to host memory
hipMemcpy(h_a, d_a, ARRAY_BYTES, hipMemcpyDeviceToHost);
printf("Number of times a particular Array index has been incremented without atomic add is: \n");
for (int i = 0; i < SIZE; i++)
{
printf("index: %d --> %d times\n ", i, h_a[i]);
}
hipFree(d_a);
return 0;
} | 0761e131233014dfa24864c4fc2f9895689882e3.cu | /**atomic operation 原子操作
* 考虑大量的线程需要同时访问同一内存区域的内存,特别进行写入操作,容易出现很危险的情况。
* 原子操作是不可以被其他线程扰乱的原子性的整体完成的一组操作。
* 《UNIX 环境高级编程》书籍中有对 原子操作 详细的讲解。
* 如果没有这样 原子操作 ,则会出现一些未知的不可控制的情况出现。
*/
#include <stdio.h>
#define NUM_THREADS 10000
#define SIZE 10
#define BLOCK_WIDTH 100
__global__ void gpu_increment_without_atomic(int *d_a)
{
// Calculate thread id for current thread
int tid = blockIdx.x * blockDim.x + threadIdx.x;
// each thread increments elements wrapping at SIZE variable
tid = tid % SIZE;
d_a[tid] += 1;
}
int main(int argc, char **argv)
{
printf("%d total threads in %d blocks writing into %d array elements\n", NUM_THREADS, NUM_THREADS / BLOCK_WIDTH, SIZE);
// declare and allocate host memory
int h_a[SIZE];
const int ARRAY_BYTES = SIZE * sizeof(int);
// declare and allocate GPU memory
int * d_a;
cudaMalloc((void **)&d_a, ARRAY_BYTES);
//Initialize GPU memory to zero
cudaMemset((void *)d_a, 0, ARRAY_BYTES);
gpu_increment_without_atomic << <NUM_THREADS / BLOCK_WIDTH, BLOCK_WIDTH >> >(d_a);
// copy back the array to host memory
cudaMemcpy(h_a, d_a, ARRAY_BYTES, cudaMemcpyDeviceToHost);
printf("Number of times a particular Array index has been incremented without atomic add is: \n");
for (int i = 0; i < SIZE; i++)
{
printf("index: %d --> %d times\n ", i, h_a[i]);
}
cudaFree(d_a);
return 0;
} |
ab9af7fef924d0234a3b6e0017f0b04638aa6eac.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hip/device_functions.h>
#include <stdio.h>
#include <iostream>
#include <ctime>
#include <cstdlib>
#include <cmath>
#define TILE_WIDTH 32
void init_mat(int size_i, int size_j, float*& A) {
A = new float[size_i * size_j];
srand(time(NULL));
for (int i = 0; i < size_i; i++) {
for (int j = 0; j < size_j; j++) {
A[i * size_j + j] = rand() % 1000;
}
}
}
// M = m x n, N = n x p, P = m x p
__global__ void MatrixMulKernel(float* M, float* N, float* P, int m, int n, int p) {
// Calculate row index of the P element and M
int row = blockIdx.y * blockDim.y + threadIdx.y;
// Calculate column index of P and N
int col = blockIdx.x * blockDim.x + threadIdx.x;
if ((row < m) && (col < p)) {
float Pvalue = 0;
// each thread computes one element of the block sub-mat
for (int k = 0; k < n; k++) {
Pvalue += M[row * n + k] * N[k * p + col];
}
P[row * p + col] = Pvalue;
}
}
// M = m x n, N = n x p, P = m x p
__global__ void MatrixMulTilingKernel(float* d_M, float* d_N, float* d_P, int M, int N, int P) {
__shared__ float Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ float Nds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
//Identify row and column of the d_P element to work on
int row = by * TILE_WIDTH + ty;
int col = bx * TILE_WIDTH + tx;
float Pvalue = 0;
// Loop over d_M and d_N tiles required to compute d_P element
for (int ph = 0; ph < (N-1) / TILE_WIDTH + 1; ph++) {
// Collaborative loading of d_M and d_N tiles into shared mem
if ((row < M) && (ph * TILE_WIDTH + tx) < N)
Mds[ty][tx] = d_M[(row * N) + (ph * TILE_WIDTH) + tx];
else Mds[ty][tx] = 0.0f;
if ((ph * TILE_WIDTH + ty) < N && col < P)
Nds[ty][tx] = d_N[(ph * TILE_WIDTH + ty) * P + col];
else Nds[ty][tx] = 0.0f;
__syncthreads();
for (int k = 0; k < TILE_WIDTH; k++) {
Pvalue += Mds[ty][k] * Nds[k][tx];
}
__syncthreads();
}
if ( (row < M) && (col < P) ) d_P[row * P + col] = Pvalue;
}
// M = m x n, N = n x p, P = m x p
void hostMatrixMult(float* M, float* N, float* P, int m, int n, int p, int kernel_func = 0) {
float* d_M, * d_N, * d_P;
hipMalloc((void**)&d_M, (m*n) * sizeof(float));
hipMalloc((void**)&d_N, (n*p) * sizeof(float));
hipMalloc((void**)&d_P, (m*p) * sizeof(float));
hipMemcpy(d_M, M, (m*n) * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_N, N, (n*p) * sizeof(float), hipMemcpyHostToDevice);
/*
kernel call
*/
if (kernel_func == 0) {
// regular multiplication
//dim3 dimGrid(ceil(width / 1024.0), 1, 1);
//dim3 dimBlock(1024, 1, 1);
dim3 dimGrid(ceil(m / 32.0), ceil(p / 32.0), 1);
dim3 dimBlock(32, 32, 1);
MatrixMulKernel << < dimGrid, dimBlock >> > (d_M, d_N, d_P, m, n, p);
}
else if (kernel_func == 1) {
// tiling multiplication
std::cout << "Tiling\n";
dim3 dimGrid(ceil(m / 32.0), ceil(p/32.0), 1);
dim3 dimBlock(32, 32, 1);
int width = p;
MatrixMulTilingKernel << < dimGrid, dimBlock >> > (d_M, d_N, d_P, m, n, p);
}
hipMemcpy(P, d_P, (m*p) * sizeof(float), hipMemcpyDeviceToHost);
hipFree(d_M);
hipFree(d_N);
hipFree(d_P);
}
int main()
{
// old portion of code
// used when algorithms only worked with squared matrices
/*int sizes[] = { 1024, 2048, 4096, 8192, 16384};
for (int i = 0; i < 5; i++) {
std::cout << "Size: " << sizes[i] << std::endl;
float* A, * B;
init_mat(sizes[i], A);
init_mat(sizes[i], B);
float* output = new float[sizes[i] * sizes[i]];
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
hostMatrixMult(A, B, output, sizes[i],0);
hipEventRecord(stop);
hipEventSynchronize(stop);
float elapsed_time = 0;
hipEventElapsedTime(&elapsed_time, start, stop);
std::cout << "Operation " << 0 << ", time: " << elapsed_time << std::endl;
delete[] A;
delete[] B;
delete[] output;
init_mat(sizes[i], A);
init_mat(sizes[i], B);
output = new float[sizes[i] * sizes[i]];
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
hostMatrixMult(A, B, output, sizes[i], 1);
hipEventRecord(stop);
hipEventSynchronize(stop);
elapsed_time = 0;
hipEventElapsedTime(&elapsed_time, start, stop);
std::cout << "Operation " << 1 << ", time: " << elapsed_time << std::endl;
delete[] A;
delete[] B;
delete[] output;
}
*/
int sizes_i[] = { 4096, 16384 };
int sizes_j[] = { 1024, 2048, 8192 };
for (int i = 0; i < 2; i++) {
for (int j = 0; j < 3; j++) {
std::cout << "Size i: " << sizes_i[i] << std::endl;
std::cout << "Size j: " << sizes_j[j] << std::endl;
float* A, * B;
init_mat(sizes_i[i], sizes_j[j], A); // A = i x j
init_mat(sizes_j[j], sizes_i[i], B); // B = j x i
float* output = new float[sizes_i[i] * sizes_i[i]]; // C = i x i
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
hostMatrixMult(A, B, output, sizes_i[i], sizes_j[j], sizes_i[i], 0);
hipEventRecord(stop);
hipEventSynchronize(stop);
float elapsed_time = 0;
hipEventElapsedTime(&elapsed_time, start, stop);
std::cout << "Operation " << 0 << ", time: " << elapsed_time << std::endl;
delete[] A;
delete[] B;
delete[] output;
init_mat(sizes_i[i], sizes_j[j], A); // A = i x j
init_mat(sizes_j[j], sizes_i[i], B); // B = j x i
output = new float[sizes_i[i] * sizes_i[i]]; // C = i x i
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
hostMatrixMult(A, B, output, sizes_i[i], sizes_j[j], sizes_i[i], 1);
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed_time, start, stop);
std::cout << "Operation " << 1 << ", time: " << elapsed_time << std::endl;
delete[] A;
delete[] B;
delete[] output;
}
}
return 0;
} | ab9af7fef924d0234a3b6e0017f0b04638aa6eac.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <device_functions.h>
#include <stdio.h>
#include <iostream>
#include <ctime>
#include <cstdlib>
#include <cmath>
#define TILE_WIDTH 32
void init_mat(int size_i, int size_j, float*& A) {
A = new float[size_i * size_j];
srand(time(NULL));
for (int i = 0; i < size_i; i++) {
for (int j = 0; j < size_j; j++) {
A[i * size_j + j] = rand() % 1000;
}
}
}
// M = m x n, N = n x p, P = m x p
__global__ void MatrixMulKernel(float* M, float* N, float* P, int m, int n, int p) {
// Calculate row index of the P element and M
int row = blockIdx.y * blockDim.y + threadIdx.y;
// Calculate column index of P and N
int col = blockIdx.x * blockDim.x + threadIdx.x;
if ((row < m) && (col < p)) {
float Pvalue = 0;
// each thread computes one element of the block sub-mat
for (int k = 0; k < n; k++) {
Pvalue += M[row * n + k] * N[k * p + col];
}
P[row * p + col] = Pvalue;
}
}
// M = m x n, N = n x p, P = m x p
__global__ void MatrixMulTilingKernel(float* d_M, float* d_N, float* d_P, int M, int N, int P) {
__shared__ float Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ float Nds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
//Identify row and column of the d_P element to work on
int row = by * TILE_WIDTH + ty;
int col = bx * TILE_WIDTH + tx;
float Pvalue = 0;
// Loop over d_M and d_N tiles required to compute d_P element
for (int ph = 0; ph < (N-1) / TILE_WIDTH + 1; ph++) {
// Collaborative loading of d_M and d_N tiles into shared mem
if ((row < M) && (ph * TILE_WIDTH + tx) < N)
Mds[ty][tx] = d_M[(row * N) + (ph * TILE_WIDTH) + tx];
else Mds[ty][tx] = 0.0f;
if ((ph * TILE_WIDTH + ty) < N && col < P)
Nds[ty][tx] = d_N[(ph * TILE_WIDTH + ty) * P + col];
else Nds[ty][tx] = 0.0f;
__syncthreads();
for (int k = 0; k < TILE_WIDTH; k++) {
Pvalue += Mds[ty][k] * Nds[k][tx];
}
__syncthreads();
}
if ( (row < M) && (col < P) ) d_P[row * P + col] = Pvalue;
}
// M = m x n, N = n x p, P = m x p
void hostMatrixMult(float* M, float* N, float* P, int m, int n, int p, int kernel_func = 0) {
float* d_M, * d_N, * d_P;
cudaMalloc((void**)&d_M, (m*n) * sizeof(float));
cudaMalloc((void**)&d_N, (n*p) * sizeof(float));
cudaMalloc((void**)&d_P, (m*p) * sizeof(float));
cudaMemcpy(d_M, M, (m*n) * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_N, N, (n*p) * sizeof(float), cudaMemcpyHostToDevice);
/*
kernel call
*/
if (kernel_func == 0) {
// regular multiplication
//dim3 dimGrid(ceil(width / 1024.0), 1, 1);
//dim3 dimBlock(1024, 1, 1);
dim3 dimGrid(ceil(m / 32.0), ceil(p / 32.0), 1);
dim3 dimBlock(32, 32, 1);
MatrixMulKernel << < dimGrid, dimBlock >> > (d_M, d_N, d_P, m, n, p);
}
else if (kernel_func == 1) {
// tiling multiplication
std::cout << "Tiling\n";
dim3 dimGrid(ceil(m / 32.0), ceil(p/32.0), 1);
dim3 dimBlock(32, 32, 1);
int width = p;
MatrixMulTilingKernel << < dimGrid, dimBlock >> > (d_M, d_N, d_P, m, n, p);
}
cudaMemcpy(P, d_P, (m*p) * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_M);
cudaFree(d_N);
cudaFree(d_P);
}
int main()
{
// old portion of code
// used when algorithms only worked with squared matrices
/*int sizes[] = { 1024, 2048, 4096, 8192, 16384};
for (int i = 0; i < 5; i++) {
std::cout << "Size: " << sizes[i] << std::endl;
float* A, * B;
init_mat(sizes[i], A);
init_mat(sizes[i], B);
float* output = new float[sizes[i] * sizes[i]];
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
hostMatrixMult(A, B, output, sizes[i],0);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float elapsed_time = 0;
cudaEventElapsedTime(&elapsed_time, start, stop);
std::cout << "Operation " << 0 << ", time: " << elapsed_time << std::endl;
delete[] A;
delete[] B;
delete[] output;
init_mat(sizes[i], A);
init_mat(sizes[i], B);
output = new float[sizes[i] * sizes[i]];
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
hostMatrixMult(A, B, output, sizes[i], 1);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
elapsed_time = 0;
cudaEventElapsedTime(&elapsed_time, start, stop);
std::cout << "Operation " << 1 << ", time: " << elapsed_time << std::endl;
delete[] A;
delete[] B;
delete[] output;
}
*/
int sizes_i[] = { 4096, 16384 };
int sizes_j[] = { 1024, 2048, 8192 };
for (int i = 0; i < 2; i++) {
for (int j = 0; j < 3; j++) {
std::cout << "Size i: " << sizes_i[i] << std::endl;
std::cout << "Size j: " << sizes_j[j] << std::endl;
float* A, * B;
init_mat(sizes_i[i], sizes_j[j], A); // A = i x j
init_mat(sizes_j[j], sizes_i[i], B); // B = j x i
float* output = new float[sizes_i[i] * sizes_i[i]]; // C = i x i
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
hostMatrixMult(A, B, output, sizes_i[i], sizes_j[j], sizes_i[i], 0);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float elapsed_time = 0;
cudaEventElapsedTime(&elapsed_time, start, stop);
std::cout << "Operation " << 0 << ", time: " << elapsed_time << std::endl;
delete[] A;
delete[] B;
delete[] output;
init_mat(sizes_i[i], sizes_j[j], A); // A = i x j
init_mat(sizes_j[j], sizes_i[i], B); // B = j x i
output = new float[sizes_i[i] * sizes_i[i]]; // C = i x i
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
hostMatrixMult(A, B, output, sizes_i[i], sizes_j[j], sizes_i[i], 1);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time, start, stop);
std::cout << "Operation " << 1 << ", time: " << elapsed_time << std::endl;
delete[] A;
delete[] B;
delete[] output;
}
}
return 0;
} |
58a6a289fdceec845316805c7e2a2400052dc18f.hip | // !!! This is a file automatically generated by hipify!!!
/*=========================================================================
*
* Copyright RTK Consortium
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0.txt
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*=========================================================================*/
#include "rtkCudaParkerShortScanImageFilter.hcu"
#include "rtkCudaUtilities.hcu"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <math_constants.h>
texture<float, 1, hipReadModeElementType> tex_geometry; // geometry texture
inline __device__ float
TransformIndexToPhysicalPoint(int2 idx, float origin, float row, float column)
{
return origin + row * idx.x + column * idx.y;
}
inline __device__ float
ToUntiltedCoordinateAtIsocenter(float tiltedCoord, float sdd, float sid, float sx, float px, float sidu)
{
// sidu is the distance between the source and the virtual untilted detector
// l is the coordinate on the virtual detector parallel to the real detector
// and passing at the isocenter
const float l = (tiltedCoord + px - sx) * sid / sdd + sx;
// a is the angle between the virtual detector and the real detector
const float cosa = sx / sidu;
// the following relation refers to a note by R. Clackdoyle, title
// "Samping a tilted detector"
return l * sid / (sidu - l * cosa);
}
__global__ void
kernel_parker_weight(int2 proj_idx,
int3 proj_size,
int2 proj_size_buf_in,
int2 proj_size_buf_out,
float * dev_proj_in,
float * dev_proj_out,
float delta,
float firstAngle,
float proj_orig, // projection origin
float proj_row, // projection row direction & spacing
float proj_col // projection col direction & spacing
)
{
// compute projection index (== thread index)
int3 pIdx;
pIdx.x = blockIdx.x * blockDim.x + threadIdx.x;
pIdx.y = blockIdx.y * blockDim.y + threadIdx.y;
pIdx.z = blockIdx.z * blockDim.z + threadIdx.z;
long int pIdx_comp_in = pIdx.x + (pIdx.y + pIdx.z * proj_size_buf_in.y) * (proj_size_buf_in.x);
long int pIdx_comp_out = pIdx.x + (pIdx.y + pIdx.z * proj_size_buf_out.y) * (proj_size_buf_out.x);
// check if outside of projection grid
if (pIdx.x >= proj_size.x || pIdx.y >= proj_size.y || pIdx.z >= proj_size.z)
return;
float sdd = tex1Dfetch(tex_geometry, pIdx.z * 5 + 0);
float sx = tex1Dfetch(tex_geometry, pIdx.z * 5 + 1);
float px = tex1Dfetch(tex_geometry, pIdx.z * 5 + 2);
float sid = tex1Dfetch(tex_geometry, pIdx.z * 5 + 3);
// convert actual index to point
float pPoint =
TransformIndexToPhysicalPoint(make_int2(pIdx.x + proj_idx.x, pIdx.y + proj_idx.y), proj_orig, proj_row, proj_col);
// alpha projection angle
float hyp = sqrtf(sid * sid + sx * sx); // to untilted situation
float invsid = 1.f / hyp;
float l = ToUntiltedCoordinateAtIsocenter(pPoint, sdd, sid, sx, px, hyp);
float alpha = atan(-1 * l * invsid);
// beta projection angle: Parker's article assumes that the scan starts at 0
float beta = tex1Dfetch(tex_geometry, pIdx.z * 5 + 4);
beta -= firstAngle;
if (beta < 0)
beta += (2.f * CUDART_PI_F);
// compute weight
float weight = 0.;
if (beta <= (2 * delta - 2 * alpha))
weight = 2.f * powf(sinf((CUDART_PI_F * beta) / (4 * (delta - alpha))), 2.f);
else if (beta <= (CUDART_PI_F - 2 * alpha))
weight = 2.f;
else if (beta <= (CUDART_PI_F + 2 * delta))
weight = 2.f * powf(sinf((CUDART_PI_F * (CUDART_PI_F + 2 * delta - beta)) / (4 * (delta + alpha))), 2.f);
// compute outpout by multiplying with weight
dev_proj_out[pIdx_comp_out] = dev_proj_in[pIdx_comp_in] * weight;
}
void
CUDA_parker_weight(int proj_idx[2],
int proj_dim[3],
int proj_dim_buf_in[2],
int proj_dim_buf_out[2],
float * dev_proj_in,
float * dev_proj_out,
float * geometries,
float delta,
float firstAngle,
float proj_orig,
float proj_row,
float proj_col)
{
// copy geometry matrix to device, bind the matrix to the texture
float * dev_geom;
hipMalloc((void **)&dev_geom, proj_dim[2] * 5 * sizeof(float));
CUDA_CHECK_ERROR;
hipMemcpy(dev_geom, geometries, proj_dim[2] * 5 * sizeof(float), hipMemcpyHostToDevice);
CUDA_CHECK_ERROR;
hipBindTexture(0, tex_geometry, dev_geom, proj_dim[2] * 5 * sizeof(float));
CUDA_CHECK_ERROR;
// Thread Block Dimensions
int tBlock_x = 16;
int tBlock_y = 16;
int tBlock_z = 2;
// Each element in the volume (each voxel) gets 1 thread
unsigned int blocksInX = (proj_dim[0] - 1) / tBlock_x + 1;
unsigned int blocksInY = (proj_dim[1] - 1) / tBlock_y + 1;
unsigned int blocksInZ = (proj_dim[2] - 1) / tBlock_z + 1;
dim3 dimGrid = dim3(blocksInX, blocksInY, blocksInZ);
dim3 dimBlock = dim3(tBlock_x, tBlock_y, tBlock_z);
hipLaunchKernelGGL(( kernel_parker_weight), dim3(dimGrid), dim3(dimBlock), 0, 0, make_int2(proj_idx[0], proj_idx[1]),
make_int3(proj_dim[0], proj_dim[1], proj_dim[2]),
make_int2(proj_dim_buf_in[0], proj_dim_buf_in[1]),
make_int2(proj_dim_buf_out[0], proj_dim_buf_out[1]),
dev_proj_in,
dev_proj_out,
delta,
firstAngle,
proj_orig,
proj_row,
proj_col);
// Unbind matrix texture
hipUnbindTexture(tex_geometry);
CUDA_CHECK_ERROR;
hipFree(dev_geom);
CUDA_CHECK_ERROR;
}
| 58a6a289fdceec845316805c7e2a2400052dc18f.cu | /*=========================================================================
*
* Copyright RTK Consortium
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0.txt
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*=========================================================================*/
#include "rtkCudaParkerShortScanImageFilter.hcu"
#include "rtkCudaUtilities.hcu"
#include <cuda.h>
#include <cuda_runtime.h>
#include <math_constants.h>
texture<float, 1, cudaReadModeElementType> tex_geometry; // geometry texture
inline __device__ float
TransformIndexToPhysicalPoint(int2 idx, float origin, float row, float column)
{
return origin + row * idx.x + column * idx.y;
}
inline __device__ float
ToUntiltedCoordinateAtIsocenter(float tiltedCoord, float sdd, float sid, float sx, float px, float sidu)
{
// sidu is the distance between the source and the virtual untilted detector
// l is the coordinate on the virtual detector parallel to the real detector
// and passing at the isocenter
const float l = (tiltedCoord + px - sx) * sid / sdd + sx;
// a is the angle between the virtual detector and the real detector
const float cosa = sx / sidu;
// the following relation refers to a note by R. Clackdoyle, title
// "Samping a tilted detector"
return l * sid / (sidu - l * cosa);
}
__global__ void
kernel_parker_weight(int2 proj_idx,
int3 proj_size,
int2 proj_size_buf_in,
int2 proj_size_buf_out,
float * dev_proj_in,
float * dev_proj_out,
float delta,
float firstAngle,
float proj_orig, // projection origin
float proj_row, // projection row direction & spacing
float proj_col // projection col direction & spacing
)
{
// compute projection index (== thread index)
int3 pIdx;
pIdx.x = blockIdx.x * blockDim.x + threadIdx.x;
pIdx.y = blockIdx.y * blockDim.y + threadIdx.y;
pIdx.z = blockIdx.z * blockDim.z + threadIdx.z;
long int pIdx_comp_in = pIdx.x + (pIdx.y + pIdx.z * proj_size_buf_in.y) * (proj_size_buf_in.x);
long int pIdx_comp_out = pIdx.x + (pIdx.y + pIdx.z * proj_size_buf_out.y) * (proj_size_buf_out.x);
// check if outside of projection grid
if (pIdx.x >= proj_size.x || pIdx.y >= proj_size.y || pIdx.z >= proj_size.z)
return;
float sdd = tex1Dfetch(tex_geometry, pIdx.z * 5 + 0);
float sx = tex1Dfetch(tex_geometry, pIdx.z * 5 + 1);
float px = tex1Dfetch(tex_geometry, pIdx.z * 5 + 2);
float sid = tex1Dfetch(tex_geometry, pIdx.z * 5 + 3);
// convert actual index to point
float pPoint =
TransformIndexToPhysicalPoint(make_int2(pIdx.x + proj_idx.x, pIdx.y + proj_idx.y), proj_orig, proj_row, proj_col);
// alpha projection angle
float hyp = sqrtf(sid * sid + sx * sx); // to untilted situation
float invsid = 1.f / hyp;
float l = ToUntiltedCoordinateAtIsocenter(pPoint, sdd, sid, sx, px, hyp);
float alpha = atan(-1 * l * invsid);
// beta projection angle: Parker's article assumes that the scan starts at 0
float beta = tex1Dfetch(tex_geometry, pIdx.z * 5 + 4);
beta -= firstAngle;
if (beta < 0)
beta += (2.f * CUDART_PI_F);
// compute weight
float weight = 0.;
if (beta <= (2 * delta - 2 * alpha))
weight = 2.f * powf(sinf((CUDART_PI_F * beta) / (4 * (delta - alpha))), 2.f);
else if (beta <= (CUDART_PI_F - 2 * alpha))
weight = 2.f;
else if (beta <= (CUDART_PI_F + 2 * delta))
weight = 2.f * powf(sinf((CUDART_PI_F * (CUDART_PI_F + 2 * delta - beta)) / (4 * (delta + alpha))), 2.f);
// compute outpout by multiplying with weight
dev_proj_out[pIdx_comp_out] = dev_proj_in[pIdx_comp_in] * weight;
}
void
CUDA_parker_weight(int proj_idx[2],
int proj_dim[3],
int proj_dim_buf_in[2],
int proj_dim_buf_out[2],
float * dev_proj_in,
float * dev_proj_out,
float * geometries,
float delta,
float firstAngle,
float proj_orig,
float proj_row,
float proj_col)
{
// copy geometry matrix to device, bind the matrix to the texture
float * dev_geom;
cudaMalloc((void **)&dev_geom, proj_dim[2] * 5 * sizeof(float));
CUDA_CHECK_ERROR;
cudaMemcpy(dev_geom, geometries, proj_dim[2] * 5 * sizeof(float), cudaMemcpyHostToDevice);
CUDA_CHECK_ERROR;
cudaBindTexture(0, tex_geometry, dev_geom, proj_dim[2] * 5 * sizeof(float));
CUDA_CHECK_ERROR;
// Thread Block Dimensions
int tBlock_x = 16;
int tBlock_y = 16;
int tBlock_z = 2;
// Each element in the volume (each voxel) gets 1 thread
unsigned int blocksInX = (proj_dim[0] - 1) / tBlock_x + 1;
unsigned int blocksInY = (proj_dim[1] - 1) / tBlock_y + 1;
unsigned int blocksInZ = (proj_dim[2] - 1) / tBlock_z + 1;
dim3 dimGrid = dim3(blocksInX, blocksInY, blocksInZ);
dim3 dimBlock = dim3(tBlock_x, tBlock_y, tBlock_z);
kernel_parker_weight<<<dimGrid, dimBlock>>>(make_int2(proj_idx[0], proj_idx[1]),
make_int3(proj_dim[0], proj_dim[1], proj_dim[2]),
make_int2(proj_dim_buf_in[0], proj_dim_buf_in[1]),
make_int2(proj_dim_buf_out[0], proj_dim_buf_out[1]),
dev_proj_in,
dev_proj_out,
delta,
firstAngle,
proj_orig,
proj_row,
proj_col);
// Unbind matrix texture
cudaUnbindTexture(tex_geometry);
CUDA_CHECK_ERROR;
cudaFree(dev_geom);
CUDA_CHECK_ERROR;
}
|
e54e9ca38ea9a61233a243a86928111db8a94e66.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "gpumatrix.cuh"
#include "cuda_common.cuh"
#include "cuda_common_api.h"
template<typename T>
__global__ void kTranspose(T * id, T * od, const int width, const int height)
{
unsigned int tx = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int ty = threadIdx.y + blockIdx.y * blockDim.y;
unsigned int ltidx = threadIdx.x;
unsigned int ltidy = threadIdx.y;
unsigned int inputIdx = ty * width + tx;
__shared__ T smem[BlockYTransp][BlockXTransp];
if(tx >= width || ty >= height) return;
smem[ltidy][ltidx] = id[inputIdx];
__syncthreads();
tx = threadIdx.y + blockIdx.x * blockDim.x;
ty = threadIdx.x + blockIdx.y * blockDim.y;
unsigned int outputIdx = tx * height + ty;
od[outputIdx] = smem[ltidx][ltidy];
}
| e54e9ca38ea9a61233a243a86928111db8a94e66.cu | #include "gpumatrix.cuh"
#include "cuda_common.cuh"
#include "cuda_common_api.h"
template<typename T>
__global__ void kTranspose(T * id, T * od, const int width, const int height)
{
unsigned int tx = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int ty = threadIdx.y + blockIdx.y * blockDim.y;
unsigned int ltidx = threadIdx.x;
unsigned int ltidy = threadIdx.y;
unsigned int inputIdx = ty * width + tx;
__shared__ T smem[BlockYTransp][BlockXTransp];
if(tx >= width || ty >= height) return;
smem[ltidy][ltidx] = id[inputIdx];
__syncthreads();
tx = threadIdx.y + blockIdx.x * blockDim.x;
ty = threadIdx.x + blockIdx.y * blockDim.y;
unsigned int outputIdx = tx * height + ty;
od[outputIdx] = smem[ltidx][ltidy];
}
|
69df8802ee2b7fea8e436c0481949743b46b2bde.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* \file dnn/src/cuda/tile/tile.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
#include "src/cuda/tile/tile.cuh"
#include <stdint.h>
#include <functional>
#include <numeric>
#include "megdnn/dtype.h"
#include "src/cuda/utils.cuh"
namespace megdnn {
namespace cuda {
namespace tile {
template <typename T>
__global__ void forward_kernel_1d(
const T* src, T* dst, uint32_t sshape, uint32_t dshape, uint32_t tshape) {
uint32_t di = threadIdx.x + blockIdx.x * blockDim.x;
uint32_t si = di % sshape;
if (di < dshape) {
dst[di] = src[si];
}
}
template <typename T>
void forward_proxy_1d(
const T* src, T* dst, size_t sshape, size_t dshape, size_t tshape,
hipStream_t stream) {
size_t NR_BLOCKS = DIVUP(dshape, NR_THREADS);
hipLaunchKernelGGL(( forward_kernel_1d), dim3(NR_BLOCKS), dim3(NR_THREADS), 0, stream,
src, dst, sshape, dshape, tshape);
}
template <typename T>
__global__ void forward_kernel_2d(
const T* src, T* dst, uint32_t sshape0, uint32_t sshape1, uint32_t dshape0,
uint32_t dshape1, uint32_t tshape0, uint32_t tshape1) {
uint32_t dix = threadIdx.x + blockIdx.x * blockDim.x;
uint32_t diy = threadIdx.y + blockIdx.y * blockDim.y;
uint32_t six = dix % sshape0;
uint32_t siy = diy % sshape1;
uint32_t diz = diy * dshape0 + dix;
uint32_t siz = siy * sshape0 + six;
if (dix < dshape0 && diy < dshape1) {
dst[diz] = src[siz];
}
}
template <typename T>
void forward_proxy_2d(
const T* src, T* dst, size_t sshape0, size_t sshape1, size_t dshape0,
size_t dshape1, size_t tshape0, size_t tshape1, hipStream_t stream) {
dim3 threads(NR_THREADS_X, NR_THREADS_Y);
dim3 blocks(DIVUP(dshape0, threads.x), DIVUP(dshape1, threads.y));
hipLaunchKernelGGL(( forward_kernel_2d), dim3(blocks), dim3(threads), 0, stream,
src, dst, sshape0, sshape1, dshape0, dshape1, tshape0, tshape1);
}
template <typename T, uint32_t ndim>
__global__ void forward_kernel_generic_tpl(
const T* __restrict__ src, T* __restrict__ dst, uint32_t n,
array_wrapper<uint32_t, ndim> sshape, array_wrapper<uint32_t, ndim> dshape,
array_wrapper<uint32_t, ndim> tshape) {
uint32_t tidx = threadIdx.x + blockIdx.x * blockDim.x;
if (tidx < n) {
uint32_t didx = tidx;
uint32_t sidx = 0;
uint32_t base = 1;
// calculate index
#pragma unroll
for (size_t i = ndim; i > 0; --i) {
size_t cidx = didx % sshape.data[i - 1];
sidx += cidx * base;
base *= sshape.data[i - 1];
didx /= dshape.data[i - 1];
}
dst[tidx] = src[sidx];
}
}
template <typename T, size_t ndim>
void forward_proxy_generic_tpl(
const T* src, T* dst, const size_t* sshape_, const size_t* dshape_,
const size_t* tshape_, hipStream_t stream) {
array_wrapper<uint32_t, ndim> sshape, dshape, tshape;
for (size_t i = 0; i < ndim; ++i)
sshape.data[i] = sshape_[i];
for (size_t i = 0; i < ndim; ++i)
dshape.data[i] = dshape_[i];
for (size_t i = 0; i < ndim; ++i)
tshape.data[i] = tshape_[i];
size_t n = std::accumulate(
dshape_, dshape_ + ndim, size_t(1), std::multiplies<size_t>());
size_t NR_BLOCKS = DIVUP(n, NR_THREADS);
hipLaunchKernelGGL(( forward_kernel_generic_tpl<T, ndim>)
, dim3(NR_BLOCKS), dim3(NR_THREADS), 0, stream, src, dst, n, sshape, dshape, tshape);
}
template <typename T>
void forward_proxy_generic(
const T* src, T* dst, size_t ndim, const size_t* sshape_, const size_t* dshape_,
const size_t* tshape_, hipStream_t stream) {
#define CASE(ndim) \
case ndim: \
forward_proxy_generic_tpl<T, ndim>( \
src, dst, sshape_, dshape_, tshape_, stream); \
break;
switch (ndim) {
CASE(2);
CASE(3);
CASE(4);
CASE(5);
CASE(6);
default:
megdnn_assert_internal(false);
}
#undef CASE
}
template <typename T>
void forward_proxy(
const T* src, T* dst, size_t ndim, const size_t* sshape_, const size_t* dshape_,
const size_t* tshape_, hipStream_t stream) {
if (ndim == 1) {
forward_proxy_1d<T>(src, dst, sshape_[0], dshape_[0], tshape_[0], stream);
} else if (ndim == 2 && dshape_[0] <= 65535 * NR_THREADS_Y) {
// CUDA can launch 65535 blocks along axis Y at most.
// Note that the index 1 and 0 are swapped, it is because in the kernel,
// index zero corresponds to axis X (which is the lowest adjacent axis),
// and index one corresponds to axis Y. However, outside the kernel,
// our representation is the opposite.
forward_proxy_2d<T>(
src, dst, sshape_[1], sshape_[0], dshape_[1], dshape_[0], tshape_[1],
tshape_[0], stream);
} else {
forward_proxy_generic<T>(src, dst, ndim, sshape_, dshape_, tshape_, stream);
}
after_kernel_launch();
}
#define INST(T) \
template void forward_proxy<T>( \
const T* src, T* dst, size_t ndim, const size_t* sshape_, \
const size_t* dshape_, const size_t* tshape_, hipStream_t stream);
#define cb(DType) INST(typename DTypeTrait<DType>::ctype)
MEGDNN_FOREACH_COMPUTING_DTYPE(cb)
#undef cb
#undef INST
} // namespace tile
} // namespace cuda
} // namespace megdnn
// vim: syntax=cpp.doxygen
| 69df8802ee2b7fea8e436c0481949743b46b2bde.cu | /**
* \file dnn/src/cuda/tile/tile.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
#include "src/cuda/tile/tile.cuh"
#include <stdint.h>
#include <functional>
#include <numeric>
#include "megdnn/dtype.h"
#include "src/cuda/utils.cuh"
namespace megdnn {
namespace cuda {
namespace tile {
template <typename T>
__global__ void forward_kernel_1d(
const T* src, T* dst, uint32_t sshape, uint32_t dshape, uint32_t tshape) {
uint32_t di = threadIdx.x + blockIdx.x * blockDim.x;
uint32_t si = di % sshape;
if (di < dshape) {
dst[di] = src[si];
}
}
template <typename T>
void forward_proxy_1d(
const T* src, T* dst, size_t sshape, size_t dshape, size_t tshape,
cudaStream_t stream) {
size_t NR_BLOCKS = DIVUP(dshape, NR_THREADS);
forward_kernel_1d<<<NR_BLOCKS, NR_THREADS, 0, stream>>>(
src, dst, sshape, dshape, tshape);
}
template <typename T>
__global__ void forward_kernel_2d(
const T* src, T* dst, uint32_t sshape0, uint32_t sshape1, uint32_t dshape0,
uint32_t dshape1, uint32_t tshape0, uint32_t tshape1) {
uint32_t dix = threadIdx.x + blockIdx.x * blockDim.x;
uint32_t diy = threadIdx.y + blockIdx.y * blockDim.y;
uint32_t six = dix % sshape0;
uint32_t siy = diy % sshape1;
uint32_t diz = diy * dshape0 + dix;
uint32_t siz = siy * sshape0 + six;
if (dix < dshape0 && diy < dshape1) {
dst[diz] = src[siz];
}
}
template <typename T>
void forward_proxy_2d(
const T* src, T* dst, size_t sshape0, size_t sshape1, size_t dshape0,
size_t dshape1, size_t tshape0, size_t tshape1, cudaStream_t stream) {
dim3 threads(NR_THREADS_X, NR_THREADS_Y);
dim3 blocks(DIVUP(dshape0, threads.x), DIVUP(dshape1, threads.y));
forward_kernel_2d<<<blocks, threads, 0, stream>>>(
src, dst, sshape0, sshape1, dshape0, dshape1, tshape0, tshape1);
}
template <typename T, uint32_t ndim>
__global__ void forward_kernel_generic_tpl(
const T* __restrict__ src, T* __restrict__ dst, uint32_t n,
array_wrapper<uint32_t, ndim> sshape, array_wrapper<uint32_t, ndim> dshape,
array_wrapper<uint32_t, ndim> tshape) {
uint32_t tidx = threadIdx.x + blockIdx.x * blockDim.x;
if (tidx < n) {
uint32_t didx = tidx;
uint32_t sidx = 0;
uint32_t base = 1;
// calculate index
#pragma unroll
for (size_t i = ndim; i > 0; --i) {
size_t cidx = didx % sshape.data[i - 1];
sidx += cidx * base;
base *= sshape.data[i - 1];
didx /= dshape.data[i - 1];
}
dst[tidx] = src[sidx];
}
}
template <typename T, size_t ndim>
void forward_proxy_generic_tpl(
const T* src, T* dst, const size_t* sshape_, const size_t* dshape_,
const size_t* tshape_, cudaStream_t stream) {
array_wrapper<uint32_t, ndim> sshape, dshape, tshape;
for (size_t i = 0; i < ndim; ++i)
sshape.data[i] = sshape_[i];
for (size_t i = 0; i < ndim; ++i)
dshape.data[i] = dshape_[i];
for (size_t i = 0; i < ndim; ++i)
tshape.data[i] = tshape_[i];
size_t n = std::accumulate(
dshape_, dshape_ + ndim, size_t(1), std::multiplies<size_t>());
size_t NR_BLOCKS = DIVUP(n, NR_THREADS);
forward_kernel_generic_tpl<T, ndim>
<<<NR_BLOCKS, NR_THREADS, 0, stream>>>(src, dst, n, sshape, dshape, tshape);
}
template <typename T>
void forward_proxy_generic(
const T* src, T* dst, size_t ndim, const size_t* sshape_, const size_t* dshape_,
const size_t* tshape_, cudaStream_t stream) {
#define CASE(ndim) \
case ndim: \
forward_proxy_generic_tpl<T, ndim>( \
src, dst, sshape_, dshape_, tshape_, stream); \
break;
switch (ndim) {
CASE(2);
CASE(3);
CASE(4);
CASE(5);
CASE(6);
default:
megdnn_assert_internal(false);
}
#undef CASE
}
template <typename T>
void forward_proxy(
const T* src, T* dst, size_t ndim, const size_t* sshape_, const size_t* dshape_,
const size_t* tshape_, cudaStream_t stream) {
if (ndim == 1) {
forward_proxy_1d<T>(src, dst, sshape_[0], dshape_[0], tshape_[0], stream);
} else if (ndim == 2 && dshape_[0] <= 65535 * NR_THREADS_Y) {
// CUDA can launch 65535 blocks along axis Y at most.
// Note that the index 1 and 0 are swapped, it is because in the kernel,
// index zero corresponds to axis X (which is the lowest adjacent axis),
// and index one corresponds to axis Y. However, outside the kernel,
// our representation is the opposite.
forward_proxy_2d<T>(
src, dst, sshape_[1], sshape_[0], dshape_[1], dshape_[0], tshape_[1],
tshape_[0], stream);
} else {
forward_proxy_generic<T>(src, dst, ndim, sshape_, dshape_, tshape_, stream);
}
after_kernel_launch();
}
#define INST(T) \
template void forward_proxy<T>( \
const T* src, T* dst, size_t ndim, const size_t* sshape_, \
const size_t* dshape_, const size_t* tshape_, cudaStream_t stream);
#define cb(DType) INST(typename DTypeTrait<DType>::ctype)
MEGDNN_FOREACH_COMPUTING_DTYPE(cb)
#undef cb
#undef INST
} // namespace tile
} // namespace cuda
} // namespace megdnn
// vim: syntax=cpp.doxygen
|
e9fca4c03c50125ae38a8538f1f4697118eba2cf.hip | // !!! This is a file automatically generated by hipify!!!
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
#include <legacy/NativeOpExecutioner.h>
#include <legacy/NativeOps.h>
#include <hip/hip_runtime.h>
#include <system/buffer.h>
#include <loops/transform_any.h>
#include <loops/reduce_bool.h>
#include <loops/reduce_long.h>
#include <loops/scalar.h>
#include <helpers/threshold.h>
#include <ops/specials_cuda.h>
#include <helpers/DebugHelper.h>
#include <execution/AffinityManager.h>
#include <exceptions/datatype_exception.h>
#include <exceptions/cuda_exception.h>
#include <helpers/CudaLaunchHelper.h>
#include <graph/GraphExecutioner.h>
#include <helpers/BlasHelper.h>
#include <graph/GraphHolder.h>
#include <ops/declarable/CustomOperations.h>
#include <helpers/PointersManager.h>
//#include <sys/time.h>
#include <hiprand/hiprand.h>
#include <graph/Status.h>
#include <helpers/DebugHelper.h>
using namespace sd;
#include <loops/special_kernels.h>
#include <performance/benchmarking/FullBenchmarkSuit.h>
#include <performance/benchmarking/LightBenchmarkSuit.h>
hipDeviceProp_t *deviceProperties;
hipFuncAttributes *funcAttributes = new hipFuncAttributes[64];
int blockLimit = 128;
int maxThreads = 512;
bool allowedP2P = false;
bool supportedP2P = false;
#ifdef __ND4J_EXPERIMENTAL__
bool experimentalSupport = true;
#else
bool experimentalSupport = false;
#endif
int minThreads = 32;
__constant__ char deviceConstantMemory[49152];
// this method just does type conversion in fancy way
int getDeviceId(Nd4jPointer ptrToDeviceId) {
return (int)(Nd4jLong)ptrToDeviceId;
}
/*
* Basic CUDA constants here: number of blocks per MP
*/
int getDeviceBlockThreshold(int deviceId) {
int ccMinor = deviceProperties[deviceId].minor;
int ccMajor = deviceProperties[deviceId].major;
int blockThreshold = 8;
if (ccMajor >= 5)
blockThreshold = 32;
else if (ccMajor == 3)
blockThreshold = 16;
else if (ccMajor < 3)
blockThreshold = 8;
return blockThreshold;
}
/*
* This message returns shared memory threshold value. default overflow ratio is 0.3
*/
int getDeviceSharedThreshold(int deviceId) {
int ccMinor = deviceProperties[deviceId].minor;
int ccMajor = deviceProperties[deviceId].major;
// please note threshold isn't multiple of 32, and that's NOT a mistake
int shmemThreshold;
if (ccMajor == 6 && ccMinor == 0)
shmemThreshold = 65536;
else if (ccMajor == 6 && ccMinor == 1)
shmemThreshold = 49152;
else if (ccMajor == 5 && ccMinor == 2)
shmemThreshold = 98304;
else if (ccMajor == 5)
shmemThreshold = 65536;
else if (ccMajor == 3 && ccMinor == 7)
shmemThreshold = 114688;
else shmemThreshold = 49152;
return shmemThreshold / 0.3;
}
sd::buffer::Buffer<Nd4jLong> * createScalarBuffer(hipStream_t stream) {
auto scalarShapeInfo = shape::createScalarShapeInfo();
auto buff = sd::buffer::createBuffer(scalarShapeInfo,shape::shapeInfoLength(2), stream);
sd::buffer::copyDataToGpu(&buff, stream);
return buff;
}
class ScalarShapeInformation {
private:
sd::buffer::Buffer<Nd4jLong> *scalarDimension;
sd::buffer::Buffer<Nd4jLong> *scalarShapeInfo;
// std::thread::id threadId;
public:
ScalarShapeInformation(hipStream_t stream) {
auto scalarDimensionBuff = reinterpret_cast<Nd4jLong *>(malloc(sizeof(Nd4jLong)));
CHECK_ALLOC(scalarDimensionBuff, "Failed to allocate ShapeInfoBuffer", sizeof(Nd4jLong));
scalarDimensionBuff[0] = MAX_DIMENSION;
scalarDimension = sd::buffer::createBuffer(scalarDimensionBuff,1, stream);
scalarShapeInfo = createScalarBuffer(stream);
// threadId = std::this_thread::get_id();
}
~ScalarShapeInformation() {
sd::buffer::freeBuffer(&scalarShapeInfo);
sd::buffer::freeBuffer(&scalarDimension);
}
Nd4jLong *getShapeInfoHostPointer() {
return scalarShapeInfo->data;
}
Nd4jLong * getShapeInfoGpuPointer() {
return scalarShapeInfo->gData;
}
Nd4jLong * getDimensionHostPointer() {
return scalarDimension->data;
}
Nd4jLong * getDimensionGpuPointer() {
return scalarDimension->gData;
}
};
template <typename T>
class ScalarInfo {
sd::buffer::Buffer<T> *scalarData;
ScalarShapeInformation *shapeInfo;
T finalResult;
hipStream_t streamRef;
public:
ScalarInfo(hipStream_t stream) {
T *scalarResult = reinterpret_cast<T*>(malloc(sizeof(T)));
CHECK_ALLOC(scalarResult, "Failed to allocate new scalar buffer", sizeof(T));
shapeInfo = new ScalarShapeInformation(stream);
scalarData = sd::buffer::createBuffer(scalarResult,1, stream);
streamRef = stream;
sd::buffer::copyDataToGpu(&scalarData, stream);
}
T getFinalResultFromDevice() {
sd::buffer::copyDataFromGpu(&scalarData, streamRef);
return scalarData->data[0];
}
/**
* Get the device shape information
* representing a scalar
*/
Nd4jLong *getDeviceShapeInfo() {
return shapeInfo->getShapeInfoGpuPointer();
}
/**
* Get the dZ pointers
*/
T *getDevicePointer() {
return scalarData->gData;
}
/**
* Get the infinite dimension device pointer
*/
Nd4jLong *getDimensionDevicePointer() {
return shapeInfo->getDimensionGpuPointer();
}
~ScalarInfo() {
sd::buffer::freeBuffer(&scalarData);
delete shapeInfo;
}
};
void execPairwiseTransform( Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbY, Nd4jLong const* hYShapeInfo, Nd4jLong const* dYShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execPairwiseTransform(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
dbY->primary(), hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), extraParams);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execPairwiseTransformBool(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbY, Nd4jLong const* hYShapeInfo, Nd4jLong const* dYShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execPairwiseBoolTransform(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
dbY->primary(), hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
extraParams);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execSummaryStatsScalar(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
bool biasCorrected) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execSummaryStatsScalar(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
extraParams,
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
biasCorrected);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execBroadcastBool(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbY, Nd4jLong const* hYShapeInfo, Nd4jLong const* dYShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbDimension, Nd4jLong const* hDimensionShape, Nd4jLong const* dDimensionShape) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto tadOnlyShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
auto tadOnlyShapeInfoZ = reinterpret_cast<Nd4jLong *>(extraPointers[12]);
auto tadOffsetsZ = reinterpret_cast<Nd4jLong *>(extraPointers[13]);
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execBroadcastBool(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
dbY->primary(), hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
extraParams,
dimension, dimensionLength,
tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
/**
*
* @param opNum
* @param dX
* @param dXShapeInfo
* @param dY
* @param dYShapeInfo
* @param dZ
* @param dZShapeInfo
* @param dimension
* @param dimensionLength
*/
void execBroadcast(
Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbY, Nd4jLong const* hYShapeInfo, Nd4jLong const* dYShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
OpaqueDataBuffer *dbDimension, Nd4jLong const* hDimensionShape, Nd4jLong const* dDimensionShape) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
hipStream_t *stream = reinterpret_cast<hipStream_t *>(extraPointers[1]);
auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto tadOnlyShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
auto tadOnlyShapeInfoZ = reinterpret_cast<Nd4jLong *>(extraPointers[12]);
auto tadOffsetsZ = reinterpret_cast<Nd4jLong *>(extraPointers[13]);
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto yType = sd::ArrayOptions::dataType(hYShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execBroadcast(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
dbY->primary(), hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
dimension, dimensionLength,
tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
/**
*
* @param opNum
* @param dX
* @param dXShapeInfo
* @param extraParams
* @param dZ
* @param dZShapeInfo
*/
////////////////////////////////////////////////////////////////////////
void execReduceFloat(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduceFloatScalar(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
extraParams,
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduceSame(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduceSameScalar(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
extraParams,
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduceSame2(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const*hXShapeInfo, Nd4jLong const*dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbZ, Nd4jLong const*hZShapeInfo, Nd4jLong const*dZShapeInfo,
OpaqueDataBuffer *dbDimension, Nd4jLong const*hDimensionShape, Nd4jLong const*dDimensionShape) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
const auto zLen = shape::length(hZShapeInfo);
std::vector<int> dimensions(dimension, dimension + dimensionLength);
const Nd4jLong* zShapeInfoH = hZShapeInfo;
if(shape::rank(hXShapeInfo) - dimensionLength != shape::rank(hZShapeInfo) && zLen != 1) {
auto zPack = ConstantShapeHelper::getInstance().createShapeInfoWithNoUnitiesForReduce(hZShapeInfo, dimensions);
zShapeInfoH = reinterpret_cast<Nd4jLong const*>(zPack.primary());
}
std::vector<int> dims = (zLen != 1) ? ShapeUtils::evalDimsForReduceOp(shape::rank(hXShapeInfo), dimensions) : std::vector<int>();
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduceSame(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
extraParams,
dbZ->primary(), zShapeInfoH, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(zShapeInfoH).special(),
dims.data(), dims.size());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduceLong2(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const*hXShapeInfo, Nd4jLong const*dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbZ, Nd4jLong const*hZShapeInfo, Nd4jLong const*dZShapeInfo,
OpaqueDataBuffer *dbDimension, Nd4jLong const*hDimensionShape, Nd4jLong const*dDimensionShape) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
const auto zLen = shape::length(hZShapeInfo);
std::vector<int> dimensions(dimension, dimension + dimensionLength);
const Nd4jLong* zShapeInfoH = hZShapeInfo;
if(shape::rank(hXShapeInfo) - dimensionLength != shape::rank(hZShapeInfo) && zLen != 1) {
auto zPack = ConstantShapeHelper::getInstance().createShapeInfoWithNoUnitiesForReduce(hZShapeInfo, dimensions);
zShapeInfoH = reinterpret_cast<Nd4jLong const*>(zPack.primary());
}
std::vector<int> dims = (zLen != 1) ? ShapeUtils::evalDimsForReduceOp(shape::rank(hXShapeInfo), dimensions) : std::vector<int>();
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduceLong(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
extraParams,
dbZ->primary(), zShapeInfoH, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(zShapeInfoH).special(),
dims.data(), dims.size());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduceLong(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const*hXShapeInfo, Nd4jLong const*dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbZ, Nd4jLong const*hZShapeInfo, Nd4jLong const*dZShapeInfo) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
auto stream = reinterpret_cast<hipStream_t *>(extraPointers[1]);
auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
if (zType != sd::DataType::INT64)
throw datatype_exception::build("execReduceLong wrong Z data type", sd::DataType::INT64, zType);
auto xLength = shape::length(hXShapeInfo);
auto blockWidth = 256;
auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth);
dim3 launchDims(numBlocks, blockWidth, 32768);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceLongFunction,
::execReduceScalar(launchDims, stream, opNum,
dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), hXShapeInfo,
extraParams,
dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), hXShapeInfo,
nullptr, 0, reductionPointer, dTADShapeInfo), LIBND4J_TYPES, LONG_TYPES);
sd::DebugHelper::checkErrorCode(stream, "execReduceLong(...) failed");
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduceBool2(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const*hXShapeInfo, Nd4jLong const*dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbZ, Nd4jLong const*hZShapeInfo, Nd4jLong const*dZShapeInfo,
OpaqueDataBuffer *dbDimension, Nd4jLong const*hDimensionShape, Nd4jLong const*dDimensionShape) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
const auto zLen = shape::length(hZShapeInfo);
std::vector<int> dimensions(dimension, dimension + dimensionLength);
const Nd4jLong* zShapeInfoH = hZShapeInfo;
if(shape::rank(hXShapeInfo) - dimensionLength != shape::rank(hZShapeInfo) && zLen != 1) {
auto zPack = ConstantShapeHelper::getInstance().createShapeInfoWithNoUnitiesForReduce(hZShapeInfo, dimensions);
zShapeInfoH = reinterpret_cast<Nd4jLong const*>(zPack.primary());
}
std::vector<int> dims = (zLen != 1) ? ShapeUtils::evalDimsForReduceOp(shape::rank(hXShapeInfo), dimensions) : std::vector<int>();
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduceBool(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
extraParams,
dbZ->primary(), zShapeInfoH, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(zShapeInfoH).special(),
dims.data(), dims.size());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduceBool(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
auto stream = reinterpret_cast<hipStream_t *>(extraPointers[1]);
auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
if (zType != sd::DataType::BOOL)
throw std::runtime_error("execReduceBool requires Z operand to have BOOL type");
auto xLength = shape::length(hXShapeInfo);
auto blockWidth = 256;
auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth);
dim3 launchDims(numBlocks, blockWidth, 32768);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceBoolFunction,
::execReduceScalar(launchDims, stream, opNum,
dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), hXShapeInfo,
extraParams,
dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), hZShapeInfo,
nullptr, 0, reductionPointer, dTADShapeInfo), LIBND4J_TYPES, BOOL_TYPES);
sd::DebugHelper::checkErrorCode(stream, "execReduceBool(...) failed");
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
/**
*
* @param opNum
* @param dX
* @param dXShapeInfo
* @param extraParams
* @param dZ
* @param dZShapeInfo
* @param dimension
* @param dimensionLength
*/
////////////////////////////////////////////////////////////////////////
void execIndexReduce(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
OpaqueDataBuffer *dbDimension, Nd4jLong const* hDimensionShape, Nd4jLong const* dDimensionShape) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
auto tadPack = sd::ConstantTadHelper::getInstance().tadForDimensions(hXShapeInfo,
dimension,
shape::length(hDimensionShape));
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execIndexReduce(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
extraParams,
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
(int *) dbDimension->special(), dimensionLength,
tadPack.specialShapeInfo(), tadPack.specialOffsets());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
/**
*
* @param opNum
* @param dX
* @param dXShapeInfo
* @param extraParams
* @param dZ
* @param dZShapeInfo
*/
////////////////////////////////////////////////////////////////////////
void execReduceFloat2(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
OpaqueDataBuffer *dbDimension, Nd4jLong const* hDimensionShape, Nd4jLong const* dDimensionShape) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
const auto zLen = shape::length(hZShapeInfo);
std::vector<int> dimensions(dimension, dimension + dimensionLength);
const Nd4jLong* zShapeInfoH = hZShapeInfo;
if(shape::rank(hXShapeInfo) - dimensionLength != shape::rank(hZShapeInfo) && zLen != 1) {
auto zPack = ConstantShapeHelper::getInstance().createShapeInfoWithNoUnitiesForReduce(hZShapeInfo, dimensions);
zShapeInfoH = reinterpret_cast<Nd4jLong const*>(zPack.primary());
}
std::vector<int> dims = (zLen != 1) ? ShapeUtils::evalDimsForReduceOp(shape::rank(hXShapeInfo), dimensions) : std::vector<int>();
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduceFloat(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
extraParams,
dbZ->primary(), zShapeInfoH, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(zShapeInfoH).special(),
dims.data(), dims.size());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
/**
*
* @param opNum
* @param dX
* @param dXShapeInfo
* @param extraParams
*/
////////////////////////////////////////////////////////////////////////
void execIndexReduceScalar(
Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo){
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execIndexReduceScalar(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
extraParams,
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execTransformSame(Nd4jPointer *extraPointers,int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
auto tadShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[0] : nullptr);
auto tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[1] : nullptr);
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execTransformSame(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
extraParams,
tadShapeInfo, tadOffsets);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execTransformBool(Nd4jPointer *extraPointers,int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
auto tadShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[0] : nullptr);
auto tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[1] : nullptr);
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execTransformBool(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
extraParams,
tadShapeInfo, tadOffsets);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execTransformAny(Nd4jPointer *extraPointers,int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
auto stream = reinterpret_cast<hipStream_t *>(extraPointers[1]);
auto streamSpecial = reinterpret_cast<hipStream_t &>(extraPointers[4]);
LaunchContext lc(stream, streamSpecial, extraPointers[5], extraPointers[3],
reinterpret_cast<int *>(extraPointers[6]));
NativeOpExecutioner::execTransformAny(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
extraParams,
nullptr, nullptr);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execTransformStrict(Nd4jPointer *extraPointers,int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
auto tadShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[10] : nullptr);
auto tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[11] : nullptr);
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execTransformStrict(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
extraParams,
tadShapeInfo, tadOffsets);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execTransformFloat(Nd4jPointer *extraPointers,int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
auto tadShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[10] : nullptr);
auto tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[11] : nullptr);
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execTransformFloat(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
extraParams,
tadShapeInfo, tadOffsets);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void checkP2P() {
int curDevice = 0;
hipGetDevice(&curDevice);
int devCnt = 0;
hipGetDeviceCount(&devCnt);
if (curDevice < 0 && curDevice > devCnt)
curDevice = 0;
bool tempSupport = true;
if (devCnt > 1) {
for (int dX = 0; dX < devCnt; dX++) {
for (int dY = 0; dY < devCnt; dY++) {
if (dX == dY)
continue;
int canAccess = 0;
hipSetDevice(dX);
hipDeviceCanAccessPeer(&canAccess, dX , dY);
if (!canAccess) {
tempSupport = false;
break;
}
}
}
supportedP2P = tempSupport;
hipSetDevice(curDevice);
} else {
// if we have only 1 device - we say that we support P2P, since all data will be on 1 device
supportedP2P = true;
}
}
void enableP2P(bool enable) {
if (enable == allowedP2P)
return;
int curDevice = 0;
hipGetDevice(&curDevice);
int devCnt = 0;
hipGetDeviceCount(&devCnt);
if (curDevice < 0 && curDevice > devCnt)
curDevice = 0;
if (devCnt > 1) {
for (int dX = 0; dX < devCnt; dX++) {
for (int dY = 0; dY < devCnt; dY++) {
if (dX == dY)
continue;
int canAccess = 0;
hipSetDevice(dX);
hipDeviceCanAccessPeer(&canAccess, dX , dY);
if (canAccess) {
if (enable) {
hipDeviceEnablePeerAccess(dY, 0);
} else {
hipDeviceDisablePeerAccess(dY);
}
} else {
if (sd::Environment::getInstance().isVerbose()) printf("Peer access [%i] -> [%i] isn't possible\n", dX, dY);
}
}
}
hipSetDevice(curDevice);
}
allowedP2P = enable;
hipSetDevice(curDevice);
}
bool isP2PAvailable() {
return supportedP2P;
}
void initializeDevicesAndFunctions() {
try {
int devCnt = 0;
hipGetDeviceCount(&devCnt);
deviceProperties = new hipDeviceProp_t[devCnt];
for (int i = 0; i < devCnt; i++) {
hipSetDevice(i);
hipGetDeviceProperties(&deviceProperties[i], i);
hipDeviceSetLimit(hipLimitStackSize, 4096);
}
hipSetDevice(0);
checkP2P();
// enabling p2p gpu access if it's supported
if (supportedP2P && devCnt > 1)
enableP2P(allowedP2P);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void initializeFunctions(Nd4jPointer *functions) {
sd::BlasHelper::getInstance().initializeDeviceFunctions(functions);
/*
hipblasSgemv = (CublasSgemv)functions[0];
hipblasDgemv = (CublasDgemv)functions[1];
hipblasHgemm = (CublasHgemm)functions[2];
hipblasSgemm = (CublasSgemm)functions[3];
hipblasDgemm = (CublasDgemm)functions[4];
cublasSgemmEx = (CublasSgemmEx)functions[5];
hipblasHgemmBatched = (CublasHgemmBatched)functions[6];
hipblasSgemmBatched = (CublasSgemmBatched)functions[7];
hipblasDgemmBatched = (CublasDgemmBatched)functions[8];
*/
}
/**
* This method acquires memory chunk of requested size on host side
*
* @param pointer pointer that'll be used for allocation
* @param memorySize memory size, in bytes
* @param flags optional parameter
*/
Nd4jPointer mallocHost(Nd4jLong memorySize, int flags) {
Nd4jPointer pointer;
// hipHostMallocMapped |hipHostMallocPortable
auto res = hipHostMalloc(reinterpret_cast<void **>(&pointer), memorySize + 8, hipHostMallocDefault);
if (res != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(res);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipHostMalloc failed");
}
return reinterpret_cast<int8_t*>(pointer);
}
/**
* This method acquires memory chunk of requested size on specified device
*
* @param pointer pointer that'll be used for allocation
* @param memorySize memory size, in bytes
* @param ptrToDeviceId pointer to deviceId. For cuda that's just and int, for OpenCL that's pointer to device_id, etc
* @param flags optional parameter
*/
Nd4jPointer mallocDevice(Nd4jLong memorySize, int deviceId, int flags) {
Nd4jPointer pointer;
auto res = hipMalloc(reinterpret_cast<void **>(&pointer), memorySize + 8);
if (res != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(res);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipMalloc failed");
}
return reinterpret_cast<int8_t*>(pointer);
}
/**
* This method releases previously allocated host memory space
*
* @param pointer pointer that'll be freed
*/
int freeHost(Nd4jPointer pointer) {
auto res = hipHostFree(reinterpret_cast<void *>(pointer));
if (res != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(res);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipHostFree failed");
}
return 1L;
}
/**
* This method releases previously allocated memory space on device
*
* @param pointer pointer that'll be freed
* @param ptrToDeviceId pointer to deviceId.
*/
int freeDevice(Nd4jPointer pointer, int deviceId) {
auto res = hipFree(reinterpret_cast<void *>(pointer));
// we're intentionally skipping
if (res != 0 && res != 1) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(res);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipFree failed");
}
return res == 0 ? 1L : 0L;
}
Nd4jPointer createContext() {
return 0L;
}
Nd4jPointer createStream() {
auto stream = new hipStream_t();
auto dZ = hipStreamCreate(stream);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipStreamCreate failed");
}
return stream;
}
Nd4jPointer createEvent() {
Nd4jPointer nativeEvent= (Nd4jPointer) malloc(sizeof(hipEvent_t));
CHECK_ALLOC(nativeEvent, "Failed to allocate new CUDA event buffer", sizeof(hipEvent_t));
auto dZ = hipEventCreateWithFlags(reinterpret_cast<hipEvent_t *>(&nativeEvent), hipEventDisableTiming);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipEventCreateWithFlags failed");
}
return nativeEvent;
}
int registerEvent(Nd4jPointer event, Nd4jPointer stream) {
auto pEvent = reinterpret_cast<hipEvent_t *>(&event);
auto pStream = reinterpret_cast<hipStream_t *>(stream);
auto dZ = hipEventRecord(*pEvent, *pStream);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipEventRecord failed");
}
return 1;
}
int setDevice(int deviceId) {
AffinityManager::setCurrentDevice(deviceId);
return 1;
}
Nd4jLong getDeviceFreeMemoryDefault() {
size_t memFree = 0;
size_t memTotal = 0;
hipMemGetInfo(&memFree, &memTotal);
return (Nd4jLong) memFree;
}
Nd4jLong getDeviceFreeMemory(int device) {
int orig = -1;
hipGetDevice(&orig);
if (device >= 0 && device != orig) {
hipSetDevice(device);
}
size_t memFree = 0;
size_t memTotal = 0;
hipMemGetInfo(&memFree, &memTotal);
if (device >= 0 && device != orig) {
hipSetDevice(orig);
}
return (Nd4jLong) memFree;
}
Nd4jLong getDeviceTotalMemory(int device) {
int orig = -1;
hipGetDevice(&orig);
if (device >= 0 && device != orig) {
hipSetDevice(device);
}
size_t memFree = 0;
size_t memTotal = 0;
hipMemGetInfo(&memFree, &memTotal);
if (device >= 0 && device != orig) {
hipSetDevice(orig);
}
return (Nd4jLong) memTotal;
}
int memcpySync(Nd4jPointer dst, Nd4jPointer src, Nd4jLong size, int flags, Nd4jPointer reserved) {
hipMemcpyKind kind;
switch (flags) {
case 0: {
kind = hipMemcpyHostToHost;
}
break;
case 1: {
kind = hipMemcpyHostToDevice;
}
break;
case 2: {
kind = hipMemcpyDeviceToHost;
}
break;
case 3: {
kind = hipMemcpyDeviceToDevice;
}
break;
default: {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("UNDEFNED MEMCPY");
return 0;
}
}
auto dZ = hipMemcpy(reinterpret_cast<void *>(dst), const_cast<const void *>(reinterpret_cast<void *>(src)), static_cast<size_t>(size), kind);
if (dZ != 0) {
printf("Failed on [%p] -> [%p], size: [%i], direction: [%i], dZ: [%i]\n", src, dst, size, flags, static_cast<int>(dZ));
fflush(stdout);
fflush(stderr);
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipMemcpy failed");
return 0;
}
return 1;
}
int memcpyAsync(Nd4jPointer dst, Nd4jPointer src, Nd4jLong size, int flags, Nd4jPointer reserved) {
auto pStream = reinterpret_cast<hipStream_t *>(reserved);
hipMemcpyKind kind;
//sd::DebugHelper::checkErrorCode(pStream, "Preliminary sync failed");
switch (flags) {
case 0: {
kind = hipMemcpyHostToHost;
}
break;
case 1: {
kind = hipMemcpyHostToDevice;
}
break;
case 2: {
kind = hipMemcpyDeviceToHost;
}
break;
case 3: {
kind = hipMemcpyDeviceToDevice;
}
break;
default: {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("UNDEFNED MEMCPY");
return 0;
}
}
auto dZ = hipMemcpyAsync(reinterpret_cast<void *>(dst), const_cast<const void *>(reinterpret_cast<void *>(src)), static_cast<size_t>(size), kind, *pStream);
//auto dZ = hipMemcpy(reinterpret_cast<void *>(dst), const_cast<const void *>(reinterpret_cast<void *>(src)), static_cast<size_t>(size), kind);
if (dZ != 0) {
printf("Failed on [%p] -> [%p], size: [%i], direction: [%i], dZ: [%i]\n", src, dst, size, flags, static_cast<int>(dZ));
fflush(stdout);
fflush(stderr);
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipMemcpyAsync failed");
return 0;
}
return 1;
}
int memsetSync(Nd4jPointer dst, int value, Nd4jLong size, int flags, Nd4jPointer reserved) {
auto dZ = hipMemset(reinterpret_cast<void *>(dst), value, static_cast<size_t>(size));
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipMemset failed");
}
return 1;
}
int memsetAsync(Nd4jPointer dst, int value, Nd4jLong size, int flags, Nd4jPointer reserved) {
auto pStream = reinterpret_cast<hipStream_t *>(reserved);
auto dZ = hipMemsetAsync(reinterpret_cast<void *>(dst), value, static_cast<size_t>(size), *pStream);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipMemsetAsync failed");
}
return 1;
}
int destroyEvent(Nd4jPointer event) {
auto pEvent = reinterpret_cast<hipEvent_t *>(&event);
auto dZ = hipEventDestroy(*pEvent);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipEventDestroy failed");
}
return 1;
}
int streamSynchronize(Nd4jPointer stream) {
auto pStream = reinterpret_cast<hipStream_t *>(stream);
auto dZ = hipStreamSynchronize(*pStream);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipStreamSynchronize failed");
}
return 1L;
}
int eventSynchronize(Nd4jPointer event) {
auto pEvent = reinterpret_cast<hipEvent_t *>(&event);
auto dZ = hipEventSynchronize(*pEvent);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipEventSynchronize failed");
}
return 1L;
}
int getAvailableDevices() {
int devCnt = 0;
hipGetDeviceCount(&devCnt);
return devCnt;
}
void enableDebugMode(bool reallyEnable) {
sd::Environment::getInstance().setDebug(reallyEnable);
}
void setGridLimit(int gridSize) {
if (gridSize > 8192)
gridSize = 8192;
if (gridSize < 1)
gridSize = 1;
blockLimit = gridSize;
}
int ompGetMaxThreads() {
return maxThreads;
}
int ompGetNumThreads() {
return maxThreads;
}
void setOmpNumThreads(int threads) {
if (threads > 1024)
threads = 1024;
if (threads < 32)
threads = 32;
maxThreads = threads;
}
void enableVerboseMode(bool reallyEnable) {
sd::Environment::getInstance().setVerbose(reallyEnable);
}
int getDeviceMajor(int device) {
return deviceProperties[device].major;
}
int getDeviceMinor(int device) {
return deviceProperties[device].minor;
}
const char * getDeviceName(int device) {
return deviceProperties[device].name;
}
void specialConcat(
Nd4jPointer *extraPointers,
int dimension,
int numArrays,
Nd4jPointer *data,
Nd4jPointer *inputShapeInfo,
void *dZ,
Nd4jLong const* dZShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) {
try {
BUILD_SINGLE_SELECTOR(ArrayOptions::dataType(dZShapeInfo), sd::SpecialMethods,
::concatCpuGeneric(dimension, numArrays, data, inputShapeInfo, dZ, dZShapeInfo),
LIBND4J_TYPES);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
/**
* This method saves
*/
sd::TadPack* tadOnlyShapeInfo(Nd4jLong const* dXShapeInfo, int *dimension, int dimensionLength) {
try {
auto pack = new TadPack();
*pack = sd::ConstantTadHelper::getInstance().tadForDimensions(dXShapeInfo, dimension, dimensionLength);
return pack;
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
Nd4jLong const* getPrimaryShapeInfo(sd::TadPack* pack) {
return pack->primaryShapeInfo();
}
Nd4jLong const* getPrimaryOffsets(sd::TadPack* pack) {
return pack->primaryOffsets();
}
Nd4jLong const* getSpecialShapeInfo(sd::TadPack* pack) {
return pack->specialShapeInfo();
}
Nd4jLong const* getSpecialOffsets(sd::TadPack* pack) {
return pack->specialOffsets();
}
Nd4jLong getNumberOfTads(sd::TadPack* pack) {
return pack->numberOfTads();
}
int getShapeInfoLength(sd::TadPack* pack) {
return pack->shapeInfoLength();
}
int memcpyConstantAsync(Nd4jLong dst, Nd4jPointer src, Nd4jLong size, int flags, Nd4jPointer reserved) {
hipStream_t *pStream = reinterpret_cast<hipStream_t *>(reserved);
hipMemcpyKind kind;
DEBUG_KERNEL(pStream, -1);
switch (flags) {
case 0: {
kind = hipMemcpyHostToHost;
}
break;
case 1: {
kind = hipMemcpyHostToDevice;
}
break;
case 2: {
kind = hipMemcpyDeviceToHost;
}
case 3: {
kind = hipMemcpyDeviceToDevice;
}
break;
}
auto dZ = hipMemcpyToSymbolAsync(deviceConstantMemory, const_cast<const void *>(src), size, dst, kind, *pStream);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipMemcpyToSymbolAsync failed");
}
return 1;
}
Nd4jPointer getConstantSpace() {
Nd4jPointer dConstAddr;
hipError_t dZ = hipGetSymbolAddress(reinterpret_cast<void **>(&dConstAddr), deviceConstantMemory);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipGetSymbolAddress failed");
}
return dConstAddr;
}
void pullRows(Nd4jPointer *extraPointers,
OpaqueDataBuffer *dbX, Nd4jLong const* xShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* zShapeInfo, Nd4jLong const* dZShapeInfo,
Nd4jLong n,
Nd4jLong *indexes,
Nd4jLong const* tadShapeInfo,
Nd4jLong const* tadOffsets,
Nd4jLong const* zTadShapeInfo,
Nd4jLong const* zTadOffsets) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
hipStream_t *stream = reinterpret_cast<hipStream_t *>(extraPointers[1]);
dim3 launchDims(64, 256, 1024);
auto xType = sd::ArrayOptions::dataType(xShapeInfo);
BUILD_SINGLE_SELECTOR(xType, pullRowsKernelGeneric,
(launchDims, stream, dbX->special(), dbZ->special(), n, indexes, tadShapeInfo, tadOffsets, zTadShapeInfo, zTadOffsets),
LIBND4J_TYPES);
DEBUG_KERNEL(stream, -1);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void average(Nd4jPointer *extras,
Nd4jPointer *x, Nd4jLong const* xShapeInfo,
Nd4jPointer *dx, Nd4jLong const* dXShapeInfo,
void *z, Nd4jLong const* zShapeInfo,
void *dz, Nd4jLong const* dzShapeInfo,
int n,
Nd4jLong length,
bool propagate) {
try {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(extras[1]);
int mode = getDeviceId(extras[3]);
auto dX = reinterpret_cast<void **>(dx);
if (sd::Environment::getInstance().isDebugAndVerbose())
printf("averageFloat called\n");
auto xType = sd::ArrayOptions::dataType(xShapeInfo);
// launching on gpu
if (mode == 0) {
dim3 launchDims(256, 256, 4096);
BUILD_SINGLE_SELECTOR(xType, averagingKernelGeneric, (launchDims, stream, dX, dz, n, length, propagate),
LIBND4J_TYPES);
sd::DebugHelper::checkErrorCode(stream, "AverageFloat(...) failed");
} else {
// launching on host memory
BUILD_SINGLE_SELECTOR(xType, sd::SpecialMethods, ::averageGeneric(x, z, zShapeInfo, n, length, propagate),
LIBND4J_TYPES);
}
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void accumulate(Nd4jPointer *extras,
Nd4jPointer *x, Nd4jLong const* xShapeInfo,
Nd4jPointer *dx, Nd4jLong const* dXShapeInfo,
void *z, Nd4jLong const* zShapeInfo,
void *dz, Nd4jLong const* dzShapeInfo,
int n,
Nd4jLong length) {
try {
auto stream = reinterpret_cast<hipStream_t *>(extras[1]);
int mode = getDeviceId(extras[3]);
auto dX = reinterpret_cast<void **>(dx);
if (sd::Environment::getInstance().isDebugAndVerbose())
printf("accumulateFloat called\n");
auto xType = sd::ArrayOptions::dataType(xShapeInfo);
// launching on gpu
if (mode == 0) {
dim3 launchDims(n, 256, 16384);
BUILD_SINGLE_SELECTOR(xType, accumulateKernelGeneric, (launchDims, stream, dX, dz, n, length),
LIBND4J_TYPES);
sd::DebugHelper::checkErrorCode(stream, "AccumulateFloat(...) failed");
} else {
// launching on host memory
BUILD_SINGLE_SELECTOR(xType, sd::SpecialMethods, ::accumulateGeneric(x, z, zShapeInfo, n, length),
LIBND4J_TYPES);
}
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void shuffle(Nd4jPointer *extras,
Nd4jPointer *x, Nd4jPointer *xShapeInfo,
Nd4jPointer *dx, Nd4jPointer *dXShapeInfo,
Nd4jPointer *z, Nd4jPointer *zShapeInfo,
Nd4jPointer *dz, Nd4jPointer *dZShapeInfo,
int N,
int *shuffleMap,
Nd4jPointer *tadShapeInfo,
Nd4jPointer *tadOffsets) {
try {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(extras[1]);
auto dX = reinterpret_cast<void **>(dx);
auto dZ = reinterpret_cast<void **>(dz);
auto xShape = reinterpret_cast<Nd4jLong**>(xShapeInfo);
auto dxShape = reinterpret_cast<Nd4jLong**>(dXShapeInfo);
auto tadOnlyShapeInfo = reinterpret_cast<Nd4jLong **>(tadShapeInfo);
auto tadOffset = reinterpret_cast<Nd4jLong **>(tadOffsets);
auto xType = sd::ArrayOptions::dataType(xShape[0]);
dim3 launchDims(256, 512, 8192);
BUILD_SINGLE_SELECTOR(xType, shuffleKernelGeneric,
(launchDims, stream, dX, dxShape, dZ, N, shuffleMap, tadOnlyShapeInfo, tadOffset),
LIBND4J_TYPES);
sd::DebugHelper::checkErrorCode(stream, "shuffle(...) failed");
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
bool isExperimentalEnabled() {
return sd::Environment::getInstance().isExperimentalBuild();
}
void setOmpMinThreads(int threads) {
minThreads = sd::math::nd4j_max<int>(32, threads);
minThreads = sd::math::nd4j_min<int>(maxThreads, minThreads);
}
int getDevice() {
return sd::AffinityManager::currentDeviceId();
}
void setElementThreshold(int num) {
// this is no-op for CUDA
}
void setTADThreshold(int num) {
// this is no-op for CUDA
}
////////////////////////////////////////////////////////////////////////
void execSummaryStats(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
bool biasCorrected) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execSummaryStats(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
extraParams,
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
biasCorrected);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execSummaryStatsTad(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
OpaqueDataBuffer *dbDimension, Nd4jLong const* hDimensionShape, Nd4jLong const* dDimensionShape,
bool biasCorrected,
Nd4jLong const* tadShapeInfo, Nd4jLong const* tadOffsets) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbDimension});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execSummaryStats(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
extraParams,
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
reinterpret_cast<int *>(dbDimension->special()), dimensionLength,
tadShapeInfo, tadOffsets,
biasCorrected);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbDimension});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduce3(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbY, Nd4jLong const* hYShapeInfo, Nd4jLong const* dYShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduce3(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
extraParams,
dbY->primary(), hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduce3Tad(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbY, Nd4jLong const* hYShapeInfo, Nd4jLong const* dYShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
OpaqueDataBuffer *dbDimension, Nd4jLong const* hDimensionShape, Nd4jLong const* dDimensionShape,
Nd4jLong const* tadOnlyShapeInfo, Nd4jLong const* tadOffsets,
Nd4jLong const* yTadOnlyShapeInfo, Nd4jLong const* yTadOffsets) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
auto tadPack = sd::ConstantTadHelper::getInstance().tadForDimensions(hXShapeInfo,
dimension,
shape::length(hDimensionShape));
auto tadLength = shape::length(tadPack.primaryShapeInfo());
auto yLength = shape::length(hYShapeInfo);
auto xLength = shape::length(hXShapeInfo);
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
if (tadLength == yLength || tadLength == xLength) {
// nd4j_printf("== way\n","");
NativeOpExecutioner::execReduce3(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
extraParams,
dbY->primary(), hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
dimension, dimensionLength,
tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets);
} else
NativeOpExecutioner::execReduce3TAD(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
extraParams,
dbY->primary(), hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
dimension, dimensionLength,
tadOnlyShapeInfo, yTadOffsets, yTadOnlyShapeInfo, yTadOffsets);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduce3Scalar(Nd4jPointer *extraPointers,int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbY, Nd4jLong const* hYShapeInfo, Nd4jLong const* dYShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduce3Scalar(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
extraParams,
dbY->primary(), hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execScalarBool(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
OpaqueDataBuffer *dbScalar, Nd4jLong const* hScalarShapeInfo, Nd4jLong const* dScalarShapeInfo,
void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbScalar});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execScalarBool(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
dbScalar->primary(), hScalarShapeInfo, dbScalar->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hScalarShapeInfo).special(),
extraParams);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbScalar});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execScalarBoolTad(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
OpaqueDataBuffer *dbScalars, Nd4jLong const* hScalarShapeInfo, Nd4jLong const* dScalarShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbDimension, Nd4jLong const* hDimensionShape, Nd4jLong const* dDimensionShape,
Nd4jLong const* tadShapeInfo, Nd4jLong const* tadOffsets,
Nd4jLong const* tadShapeInfoZ, Nd4jLong const* tadOffsetsZ) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbScalars});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execScalarBool(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
extraParams,
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
dbScalars->primary(), hScalarShapeInfo, dbScalars->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hScalarShapeInfo).special(),
dimension, dimensionLength,
tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbScalars});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execScalar(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
OpaqueDataBuffer *dbScalar, Nd4jLong const* hScalarShapeInfo, Nd4jLong const* dScalarShapeInfo,
void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbScalar});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execScalar(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
dbScalar->primary(), hScalarShapeInfo, dbScalar->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hScalarShapeInfo).special(),
extraParams);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbScalar});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execScalarTad(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
OpaqueDataBuffer *dbScalars, Nd4jLong const* hScalarShapeInfo, Nd4jLong const* dScalarShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbDimension, Nd4jLong const* hDimensionShape, Nd4jLong const* dDimensionShape,
Nd4jLong const* tadShapeInfo, Nd4jLong const* tadOffsets,
Nd4jLong const* tadShapeInfoZ, Nd4jLong const* tadOffsetsZ) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbScalars});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
hipStream_t *stream = reinterpret_cast<hipStream_t *>(extraPointers[1]);
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto yType = sd::ArrayOptions::dataType(hScalarShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
if (yType != xType && yType != sd::DataType::BOOL && !isExperimentalEnabled())
throw sd::datatype_exception::build("execScalar both operands must have same data type", xType, yType);
dim3 launchDims(256, 256, 16384);
#ifdef __ND4J_EXPERIMENTAL__
BUILD_PAIRWISE_SELECTOR(xType, yType, zType, functions::scalar::ScalarTransform, ::executeCudaAlongDimension(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalars, extraParams, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), LIBND4J_TYPES, LIBND4J_TYPES);
#else
BUILD_SINGLE_SELECTOR_THRICE(xType, functions::scalar::ScalarTransform, ::executeCudaAlongDimension(launchDims, stream, opNum, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), dbScalars->special(), extraParams, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), LIBND4J_TYPES);
#endif
DEBUG_KERNEL(stream, opNum);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbScalars});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void execAggregate(Nd4jPointer *extraPointers,
int opNum,
void **arguments,
int numArguments,
Nd4jLong **shapes,
int numShapes,
int *indexArguments,
int numIndexArguments,
int **intArrays,
int numIntArrays,
void *realArguments,
int numRealArguments,
sd::DataType dtype) {
}
void batchExecutor(Nd4jPointer *extraPointers,
int numAggregates,
int opNum,
int maxArgs,
int maxShapes,
int maxIntArrays,
int maxIntArraySize,
int maxIdx,
int maxReals,
void *ptrToArguments,
sd::DataType dtype) {
}
void execAggregateBatch(Nd4jPointer *extraPointers,
int numAggregates, int opNum,
int maxArgs, int maxShapes,
int maxIntArrays, int maxIntArraySize,
int maxIdx, int maxReals,
void *ptrToArguments, sd::DataType dtype) {
}
////////////////////////////////////////////////////////////////////////
void execRandom(Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer stateHost,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
void *extraArguments) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execRandom(&lc, opNum, stateHost,
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
extraArguments);
InteropDataBuffer::registerSpecialUse({dbZ}, {});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execRandom2(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
void *extraArguments) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execRandom(&lc, opNum, stateHost,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
extraArguments);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execRandom3(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbY, Nd4jLong const* hYShapeInfo, Nd4jLong const* dYShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
void *extraArguments) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execRandom(&lc, opNum, stateHost,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
dbY->primary(), hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
extraArguments);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
Nd4jPointer initRandom(Nd4jPointer *extraPointers, long seed, long bufferSize, Nd4jPointer ptrToBuffer) {
unsigned long long *ptrHost = reinterpret_cast<unsigned long long *>(extraPointers[0]);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(extraPointers[1]);
// we don't synchronize at random initialization, it's safe to go unsync here
// hipStreamSynchronize(*stream);
auto ptrDev = reinterpret_cast<unsigned long long *>(ptrToBuffer);
auto buffer = new sd::random::RandomBuffer(seed, bufferSize, reinterpret_cast<uint64_t *>(ptrHost), reinterpret_cast<uint64_t *>(ptrDev));
buffer->propagateToDevice(buffer, *stream);
sd::DebugHelper::checkErrorCode(stream, "initRandom(...) failed A");
// we generate sequence in the host memory
sd::random::Xoroshiro128 generator(buffer);
generator.refreshBuffer();
// and copy it to gpu
hipMemcpyAsync(ptrDev, ptrHost, bufferSize * 8, hipMemcpyHostToDevice, *stream);
sd::DebugHelper::checkErrorCode(stream, "initRandom(...) failed B");
return buffer;
}
void destroyRandom(Nd4jPointer ptrBuffer) {
sd::random::RandomBuffer *buffer = reinterpret_cast<sd::random::RandomBuffer *> (ptrBuffer);
// FIXME: it's bad thing, but we can't know in advance, which stream(s) where using this generator in practice
hipDeviceSynchronize();
delete buffer;
}
void refreshBuffer(Nd4jPointer *extraPointers, long seed, Nd4jPointer ptrRandom) {
sd::random::RandomBuffer *buffer = reinterpret_cast<sd::random::RandomBuffer *> (ptrRandom);
unsigned long long *ptrHost = reinterpret_cast<unsigned long long *>(extraPointers[0]);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(extraPointers[1]);
hipStreamSynchronize(*stream);
uint64_t *ptrDev = buffer->getDeviceBuffer();
// update rng state
buffer->setSeed(seed);
buffer->setOffset(0);
buffer->propagateToDevice(buffer, *stream);
// refresh buffer on host size
sd::random::Xoroshiro128 generator(buffer);
generator.refreshBuffer();
// copy back to gpu
hipMemcpyAsync(ptrDev, ptrHost, buffer->getSize() * 8, hipMemcpyHostToDevice, *stream);
}
void reSeedBuffer(Nd4jPointer *extraPointers, long seed, Nd4jPointer ptrRandom) {
sd::random::RandomBuffer *buffer = reinterpret_cast<sd::random::RandomBuffer *> (ptrRandom);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(extraPointers[1]);
hipStreamSynchronize(*stream);
// update rng state
buffer->reSeed(seed);
buffer->setOffset(0);
buffer->propagateToDevice(buffer, *stream);
}
/**
* Return the length of a shape buffer
* based on the pointer
* @param buffer the buffer pointer to check
* @return
*/
int lengthForShapeBufferPointer(Nd4jPointer buffer) {
auto shapeBuffer = reinterpret_cast<Nd4jLong *>(buffer);
return shape::shapeInfoLength(shape::rank(shapeBuffer));
}
/**
* The pointer to get the address for
*
* @param address the address to get the pointer
* @return the pointer for the given address
*/
Nd4jPointer pointerForAddress(Nd4jLong address) {
return reinterpret_cast<Nd4jPointer >(address);
}
void tear(Nd4jPointer *extras,
OpaqueDataBuffer *dbX, Nd4jLong const* xShapeInfo, Nd4jLong const* dXShapeInfo,
Nd4jPointer *targets,
Nd4jLong const* zShapeInfo,
Nd4jLong const* tadShapeInfo,
Nd4jLong const* tadOffsets) {
try {
InteropDataBuffer::prepareSpecialUse({}, {dbX});
hipStream_t *stream = reinterpret_cast<hipStream_t *>(extras[1]);
dim3 launchDims(512, 512, 512);
auto xType = sd::ArrayOptions::dataType(xShapeInfo);
BUILD_SINGLE_SELECTOR(xType, tearKernelGeneric,
(launchDims, stream, dbX->special(), dXShapeInfo, targets, zShapeInfo, tadShapeInfo, tadOffsets),
LIBND4J_TYPES);
sd::DebugHelper::checkErrorCode(stream, "tearFloat(...) failed");
InteropDataBuffer::registerSpecialUse({}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void prescanArrayRecursive(Nd4jPointer *extras, int *dZ, int *dX, int numElements, int level) {
auto stream = reinterpret_cast<hipStream_t *>(extras[1]);
auto g_scanBlockSums = reinterpret_cast<int **>(extras[2]);
int blockSize = 512; // max size of the thread blocks
int numBlocks = sd::math::nd4j_max<int>(1, static_cast<int>(ceil(static_cast<float>(numElements) / (2.f * blockSize))));
int numThreads;
if (numBlocks > 1)
numThreads = blockSize;
else if (sd::isPowerOfTwo(numElements))
numThreads = numElements / 2;
else
numThreads = sd::floorPow2(numElements);
int numEltsPerBlock = numThreads * 2;
// if this is a non-power-of-2 array, the last block will be non-full
// compute the smallest power of 2 able to compute its scan.
int numEltsLastBlock =
numElements - (numBlocks-1) * numEltsPerBlock;
int numThreadsLastBlock = sd::math::nd4j_max<int>(1, numEltsLastBlock / 2);
int np2LastBlock = 0;
int sharedMemLastBlock = 0;
if (numEltsLastBlock != numEltsPerBlock) {
np2LastBlock = 1;
if(!isPowerOfTwo(numEltsLastBlock))
numThreadsLastBlock = floorPow2(numEltsLastBlock);
unsigned int extraSpace = (2 * numThreadsLastBlock) / NUM_BANKS;
sharedMemLastBlock = sizeof(int) * (2 * numThreadsLastBlock + extraSpace);
}
// padding space is used to avoid shared memory bank conflicts
int extraSpace = numEltsPerBlock / NUM_BANKS;
int sharedMemSize = sizeof(int) * (numEltsPerBlock + extraSpace);
// setup execution parameters
// if NP2, we process the last block separately
dim3 grid(max(1, numBlocks - np2LastBlock), 1, 1);
dim3 threads(numThreads, 1, 1);
dim3 gridOnes(1, 1, 1);
dim3 threadsOnes(numThreadsLastBlock, 1, 1);
if (sharedMemSize < 2048)
sharedMemSize = 2048;
if (sharedMemLastBlock < 2048)
sharedMemLastBlock = 2048;
// execute the scan
if (numBlocks > 1) {
sd::prescanLauncher<true, false>(grid, threads, sharedMemSize, stream, dZ, dX, g_scanBlockSums[level], numThreads * 2, 0, 0);
if (np2LastBlock) {
sd::prescanLauncher<true, true>(gridOnes, threadsOnes, sharedMemLastBlock, stream, dZ, dX, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock);
}
// After scanning all the sub-blocks, we are mostly done. But now we
// need to take all of the last values of the sub-blocks and scan those.
// This will give us a new value that must be sdded to each block to
// get the final results.
// recursive (CPU) call
prescanArrayRecursive(extras, g_scanBlockSums[level], g_scanBlockSums[level], numBlocks, level+1);
hipLaunchKernelGGL(( sd::uniformAdd), dim3(grid), dim3(threads), 1024, *stream, dZ, g_scanBlockSums[level], numElements - numEltsLastBlock, 0, 0);
if (np2LastBlock) {
hipLaunchKernelGGL(( sd::uniformAdd), dim3(1), dim3(numThreadsLastBlock), 1024, *stream, dZ, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock);
}
} else if (isPowerOfTwo(numElements)) {
sd::prescanLauncher<false, false>(grid, threads, sharedMemSize, stream, dZ, dX, 0, numThreads * 2, 0, 0);
} else {
sd::prescanLauncher<false, true>(grid, threads, sharedMemSize, stream, dZ, dX, 0, numElements, 0, 0);
}
sd::DebugHelper::checkErrorCode(stream, "prescanArray(...) failed");
}
////////////////////////////////////////////////////////////////////////
void execReduce3All(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
void *extraParamsVals,
OpaqueDataBuffer *dbY, Nd4jLong const* hYShapeInfo, Nd4jLong const* dYShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
OpaqueDataBuffer *dbDimension, Nd4jLong const* hDimensionShape, Nd4jLong const* dDimensionShape,
Nd4jLong const* xTadShapeInfo, Nd4jLong const* xOffsets,
Nd4jLong const* yTadShapeInfo, Nd4jLong const* yOffsets) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY, dbDimension});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduce3All(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
extraParamsVals,
dbY->primary(), hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
reinterpret_cast<int *>(dbDimension->special()), dimensionLength,
xTadShapeInfo, xOffsets, yTadShapeInfo, yOffsets);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void sort(Nd4jPointer *extraPointers,
void *x, Nd4jLong const* xShapeInfo,
void *dX, Nd4jLong const* dXShapeInfo,
bool descending) {
try {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(extraPointers[1]);
auto xLength = shape::length(xShapeInfo);
auto xEWS = shape::elementWiseStride(xShapeInfo);
auto xType = sd::ArrayOptions::dataType(xShapeInfo);
// check if xLength is a power of 2, and use bitonic sort, if that's the case
if ((xLength != 0) && ((xLength & (xLength - 1)) == 0) && (xLength <= 1024 * 1024 * 10)) {
int numThreads = sd::math::nd4j_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0)
numBlocks++;
dim3 launchDims(numBlocks, numThreads, 32768);
for (int k = 2; k <= xLength; k = 2 * k) {
for (int j = k >> 1; j > 0; j = j >> 1) {
BUILD_SINGLE_SELECTOR(xType, bitonicSortStepGeneric,
(launchDims, stream, dX, dXShapeInfo, j, k, xLength, descending),
LIBND4J_TYPES);
}
}
} else {
int numThreads = sd::math::nd4j_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0)
numBlocks++;
numBlocks = sd::math::nd4j_min<int>(512, numBlocks);
dim3 launchDims(numBlocks, numThreads, 32768);
int max = 2, dg = 0;
while (max < xLength) {
max <<= 1;
dg++;
}
max <<= 1;
for (int window = 2; window < max; window <<= 1) {
int n = window;
int rev = 0;
do {
int half = n >> 1;
BUILD_SINGLE_SELECTOR(xType, bitonicArbitraryStepGeneric,
(launchDims, stream, dX, dXShapeInfo, n, xLength, rev, descending),
LIBND4J_TYPES);
n >>= 1;
rev = 1;
} while (n > 1);
}
}
sd::DebugHelper::checkErrorCode(stream, "sort(...) failed");
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void sortByKey(Nd4jPointer *extraPointers,
void *x, Nd4jLong const* xShapeInfo,
void *dX, Nd4jLong const* dXShapeInfo,
void *y, Nd4jLong const* yShapeInfo,
void *dy, Nd4jLong const* dyShapeInfo,
bool descending) {
try {
auto stream = reinterpret_cast<hipStream_t *>(extraPointers[1]);
auto xLength = shape::length(xShapeInfo);
auto yLength = shape::length(yShapeInfo);
auto xEWS = shape::elementWiseStride(xShapeInfo);
auto xType = sd::ArrayOptions::dataType(xShapeInfo);
auto yType = sd::ArrayOptions::dataType(yShapeInfo);
if (shape::isEmpty(xShapeInfo) || shape::isEmpty(yShapeInfo))
return;
if (xLength != yLength)
throw std::runtime_error("sortByKey: keys and values must have the same size");
// check if xLength is a power of 2, and use bitonic sort, if that's the case
if ((xLength != 0) && ((xLength & (xLength - 1)) == 0) && (xLength <= 1024 * 1024 * 10)) {
int numThreads = sd::math::nd4j_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0)
numBlocks++;
dim3 launchDims(numBlocks, numThreads, 32768);
for (int k = 2; k <= xLength; k = 2 * k) {
for (int j = k >> 1; j > 0; j = j >> 1) {
BUILD_DOUBLE_SELECTOR(xType, yType, bitonicSortStepGenericKey,
(launchDims, stream, dX, dXShapeInfo, dy, dyShapeInfo, j, k, xLength, descending),
LIBND4J_TYPES, LIBND4J_TYPES);
}
}
} else {
int numThreads = sd::math::nd4j_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0)
numBlocks++;
numBlocks = sd::math::nd4j_min<int>(512, numBlocks);
dim3 launchDims(numBlocks, numThreads, 32768);
int max = 2, dg = 0;
while (max < xLength) {
max <<= 1;
dg++;
}
max <<= 1;
for (int window = 2; window < max; window <<= 1) {
int n = window;
int rev = 0;
do {
int half = n >> 1;
BUILD_DOUBLE_SELECTOR(xType, yType, bitonicArbitraryStepGenericKey,
(launchDims, stream, dX, dXShapeInfo, dy, dyShapeInfo, n, xLength, rev, descending),
LIBND4J_TYPES, LIBND4J_TYPES);
n >>= 1;
rev = 1;
} while (n > 1);
}
}
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void sortByValue(Nd4jPointer *extraPointers,
void *x, Nd4jLong const* xShapeInfo,
void *dX, Nd4jLong const* dXShapeInfo,
void *y, Nd4jLong const* yShapeInfo,
void *dy, Nd4jLong const* dyShapeInfo,
bool descending) {
try {
auto stream = reinterpret_cast<hipStream_t *>(extraPointers[1]);
auto xLength = shape::length(xShapeInfo);
auto yLength = shape::length(yShapeInfo);
auto xEWS = shape::elementWiseStride(xShapeInfo);
auto xType = sd::ArrayOptions::dataType(yShapeInfo);
auto yType = sd::ArrayOptions::dataType(xShapeInfo);
if (shape::isEmpty(xShapeInfo) || shape::isEmpty(yShapeInfo))
return;
if (xLength != yLength)
throw std::runtime_error("sortByValue: keys and values must have the same size");
// check if xLength is a power of 2, and use bitonic sort, if that's the case
if ((xLength != 0) && ((xLength & (xLength - 1)) == 0) && (xLength <= 1024 * 1024 * 10)) {
int numThreads = sd::math::nd4j_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0)
numBlocks++;
dim3 launchDims(numBlocks, numThreads, 32768);
for (int k = 2; k <= xLength; k = 2 * k) {
for (int j = k >> 1; j > 0; j = j >> 1) {
BUILD_DOUBLE_SELECTOR(xType, yType, bitonicSortStepGenericKey,
(launchDims, stream, dy, dyShapeInfo, dX, dXShapeInfo, j, k, xLength, descending),
LIBND4J_TYPES, LIBND4J_TYPES);
}
}
} else {
int numThreads = sd::math::nd4j_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0)
numBlocks++;
numBlocks = sd::math::nd4j_min<int>(512, numBlocks);
dim3 launchDims(numBlocks, numThreads, 32768);
int max = 2, dg = 0;
while (max < xLength) {
max <<= 1;
dg++;
}
max <<= 1;
for (int window = 2; window < max; window <<= 1) {
int n = window;
int rev = 0;
do {
int half = n >> 1;
BUILD_DOUBLE_SELECTOR(xType, yType, bitonicArbitraryStepGenericKey,
(launchDims, stream, dy, dyShapeInfo, dX, dXShapeInfo, n, xLength, rev, descending),
LIBND4J_TYPES, LIBND4J_TYPES);
n >>= 1;
rev = 1;
} while (n > 1);
}
}
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void sortTadByKey(Nd4jPointer *extraPointers,
void *x, Nd4jLong const* xShapeInfo,
void *dX, Nd4jLong const* dXShapeInfo,
void *y, Nd4jLong const* yShapeInfo,
void *dy, Nd4jLong const* dyShapeInfo,
int *dimension,
int dimensionLength,
bool descending) {
try {
auto stream = reinterpret_cast<hipStream_t *>(extraPointers[1]);
auto context = extraPointers[0] == 0 ? LaunchContext::defaultContext()
: reinterpret_cast<LaunchContext *>(extraPointers[0]);
auto tadPack = sd::ConstantTadHelper::getInstance().tadForDimensions(xShapeInfo, dimension, dimensionLength);
dim3 launchDims((int) tadPack.numberOfTads(), 256, 2048);
auto xType = sd::ArrayOptions::dataType(xShapeInfo);
auto yType = sd::ArrayOptions::dataType(yShapeInfo);
BUILD_DOUBLE_SELECTOR(xType, yType, oesTadGenericKey,
(launchDims, stream, dX, dXShapeInfo, dy, dyShapeInfo, nullptr, dimensionLength, tadPack.platformShapeInfo(), tadPack.platformOffsets(), descending),
LIBND4J_TYPES, LIBND4J_TYPES);
sd::DebugHelper::checkErrorCode(stream, "sortTadKey(...) failed");
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void sortTadByValue(Nd4jPointer *extraPointers,
void *x, Nd4jLong const* xShapeInfo,
void *dX, Nd4jLong const* dXShapeInfo,
void *y, Nd4jLong const* yShapeInfo,
void *dy, Nd4jLong const* dyShapeInfo,
int *dimension,
int dimensionLength,
bool descending) {
try {
auto stream = reinterpret_cast<hipStream_t *>(extraPointers[1]);
auto context = extraPointers[0] == 0 ? LaunchContext::defaultContext()
: reinterpret_cast<LaunchContext *>(extraPointers[0]);
auto tadPack = sd::ConstantTadHelper::getInstance().tadForDimensions(xShapeInfo, dimension, dimensionLength);
dim3 launchDims((int) tadPack.numberOfTads(), 256, 2048);
auto xType = sd::ArrayOptions::dataType(yShapeInfo);
auto yType = sd::ArrayOptions::dataType(xShapeInfo);
BUILD_DOUBLE_SELECTOR(xType, yType, oesTadGenericKey,
(launchDims, stream, dy, dyShapeInfo, dX, dXShapeInfo, nullptr, dimensionLength, tadPack.platformShapeInfo(), tadPack.platformOffsets(), descending),
LIBND4J_TYPES, LIBND4J_TYPES);
sd::DebugHelper::checkErrorCode(stream, "sortTadValue(...) failed");
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void sortTad(Nd4jPointer *extraPointers,
void *x, Nd4jLong const* xShapeInfo,
void *dX, Nd4jLong const* dXShapeInfo,
int *dimension,
int dimensionLength,
Nd4jLong const* tadShapeInfo,
Nd4jLong const* tadOffsets,
bool descending) {
try {
// to be implemented
auto stream = reinterpret_cast<hipStream_t *>(extraPointers[1]);
auto context = extraPointers[0] == 0 ? LaunchContext::defaultContext()
: reinterpret_cast<LaunchContext *>(extraPointers[0]);
auto tadPack = sd::ConstantTadHelper::getInstance().tadForDimensions(xShapeInfo, dimension, dimensionLength);
dim3 launchDims((int) tadPack.numberOfTads(), 512, 33768);
auto xType = sd::ArrayOptions::dataType(xShapeInfo);
BUILD_SINGLE_SELECTOR(xType, oesTadGeneric,
(launchDims, stream, dX, dXShapeInfo, nullptr, dimensionLength, tadShapeInfo, tadOffsets, descending),
LIBND4J_TYPES);
sd::DebugHelper::checkErrorCode(stream, "sortTad(...) failed");
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void sortCooIndices(Nd4jPointer *extraPointers, Nd4jLong *indices, void *values, Nd4jLong length, int rank) {
throw std::runtime_error("sortCooIndices:: Not implemented yet");
}
Nd4jLong* mmapFile(Nd4jPointer *extraPointers, const char *fileName, Nd4jLong length) {
return nullptr;
}
void munmapFile(Nd4jPointer *extraPointers, Nd4jLong* ptrMap, Nd4jLong length) {
}
sd::graph::ResultWrapper* executeFlatGraph(Nd4jPointer *extraPointers, Nd4jPointer flatBufferPointer) {
try {
return sd::graph::GraphExecutioner::executeFlatBuffer(flatBufferPointer);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
Nd4jLong getResultWrapperSize(sd::graph::ResultWrapper* ptr) {
return ptr->size();
}
Nd4jPointer getResultWrapperPointer(sd::graph::ResultWrapper* ptr) {
return ptr->pointer();
}
const char* getAllCustomOps() {
return sd::ops::OpRegistrator::getInstance().getAllCustomOperations();
}
sd::ShapeList* _calculateOutputShapes(Nd4jPointer* extraPointers, sd::ops::DeclarableOp* op, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool *bArgs, int numBArgs, int *dArgs, int numDArgs) {
sd::graph::VariableSpace varSpace;
Context block(2, &varSpace);
sd::ShapeList inShapes;
for (int e = 0; e < numIArgs; e++)
block.getIArguments()->push_back(iArgs[e]);
for (int e = 0; e < numTArgs; e++)
block.getTArguments()->push_back(tArgs[e]);
for (int e = 0; e < numBArgs; e++)
block.getBArguments()->push_back(bArgs[e]);
for (int e = 0; e < numDArgs; e++)
block.getDArguments()->push_back((sd::DataType) dArgs[e]);
for (int e = 0; e < numInputShapes; e++) {
auto shape_ = reinterpret_cast<Nd4jLong *>(inputShapes[e]);
// we shouldn't copy buffer if that's empty array
void *buffer_ = sd::ArrayOptions::arrayType(shape_) == ArrayType::EMPTY ? nullptr : inputBuffers[e];
void *bufferD_ = sd::ArrayOptions::arrayType(shape_) == ArrayType::EMPTY ? nullptr : inputBuffers[e + numInputShapes];
auto array = new sd::NDArray(buffer_, bufferD_, shape_);
// block should contain references to proper variable
varSpace.putVariable(1, e, array);
block.pickInput(1, e);
inShapes.push_back(shape_);
}
auto shapeList = op->calculateOutputShape(&inShapes, block);
if (varSpace.launchContext()->getWorkspace() != nullptr)
shapeList->detach();
return shapeList;
}
sd::ShapeList* calculateOutputShapes2(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool *bArgs, int numBArgs, int *dArgs, int numDArgs) {
try {
auto op = sd::ops::OpRegistrator::getInstance().getOperation(hash);
return _calculateOutputShapes(extraPointers, op, inputBuffers, inputShapes, numInputShapes, tArgs, numTArgs,
iArgs, numIArgs, bArgs, numBArgs, dArgs, numDArgs);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
sd::ShapeList* _calculateOutputShapes(Nd4jPointer* extraPointers, sd::ops::DeclarableOp* op, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs) {
Context block(1);
sd::ShapeList inShapes;
for (int e = 0; e < numIArgs; e++)
block.getIArguments()->push_back(iArgs[e]);
for (int e = 0; e < numTArgs; e++)
block.getTArguments()->push_back(tArgs[e]);
for (int e = 0; e < numInputShapes; e++)
inShapes.push_back(reinterpret_cast<Nd4jLong *>(inputShapes[e]));
auto shapeList = op->calculateOutputShape(&inShapes, block);
return shapeList;
}
sd::ShapeList* calculateOutputShapes(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs) {
try {
auto op = sd::ops::OpRegistrator::getInstance().getOperation(hash);
return _calculateOutputShapes(extraPointers, op, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
Nd4jLong getShapeListSize(sd::ShapeList* list) {
return list->size();
}
Nd4jLong const* getShape(sd::ShapeList* list, Nd4jLong i) {
return list->at(i);
}
static FORCEINLINE Nd4jStatus realExec(sd::ops::DeclarableOp* op, Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputs, Nd4jPointer* outputBuffers, Nd4jPointer* outputShapes, int numOutputs, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool* bArgs, int numBArgs, bool isInplace) {
if (op == nullptr)
nd4j_printf("Can't find requested operation: [%lld]\n", hash);
// we're using the same fake nodeId everywhere here
std::vector<sd::NDArray*> inputs(numInputs);
std::vector<sd::NDArray*> outputs(numOutputs);
std::vector<double> ttArgs(numTArgs);
std::vector<bool> bbArgs(numBArgs);
std::vector<Nd4jLong> iiArgs(numIArgs);
// filling block now with inputs
for (int e = 0; e < numInputs; e++) {
auto shape = reinterpret_cast<Nd4jLong *>(inputShapes[e]);
void *buffer = sd::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : inputBuffers[e];
void *bufferD = sd::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : inputBuffers[e + numInputs];
inputs[e] = new sd::NDArray(buffer, bufferD, shape);
}
// if not inplace - transferring output arrays
if (!isInplace)
for (int e = 0; e < numOutputs; e++) {
// we want to keep original output shape intact
auto shape = shape::copyShape(reinterpret_cast<Nd4jLong *>(outputShapes[e]));
void *buffer = sd::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : outputBuffers[e];
void *bufferD = sd::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : outputBuffers[e + numOutputs];
// FIXME: revisit this.
bool canNullify = true;
for (int i = 0; i < numInputs; i++) {
void *ibuffer = sd::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : inputBuffers[i];
if (ibuffer == buffer) {
canNullify = false;
break;
}
}
if (canNullify && buffer != nullptr)
memset((uint8_t *) buffer, '\0', shape::length(shape) * DataTypeUtils::sizeOfElement(ArrayOptions::dataType(shape)));
auto array = new sd::NDArray(buffer, bufferD, shape);
outputs[e] = array;
}
for (int e = 0; e < numIArgs; e++)
iiArgs[e] = iArgs[e];
for (int e = 0; e < numTArgs; e++)
ttArgs[e] = tArgs[e];
for (int e = 0; e < numBArgs; e++)
bbArgs[e] = bArgs[e];
// hypothetically at this point we have everything filled
auto dZ = op->execute(inputs, outputs, ttArgs, iiArgs, bbArgs, std::vector<sd::DataType>(), isInplace);
//auto dZ = op->execute(inputs, ttArgs, iiArgs, isInplace);
if (!isInplace)
for (int e = 0; e < numOutputs; e++) {
//shape::printShapeInfoLinear("JVM output shape", (int *) outputShapes[e]);
//shape::printShapeInfoLinear("C++ output shape", (int *) outputs[e]->shapeInfo());
//outputs[e]->printIndexedBuffer("C++ raw output");
//outputs[e]->printBuffer("C++ indexed output");
if (outputs[e]->ordering() != shape::order(reinterpret_cast<Nd4jLong *>(outputShapes[e])))
outputs[e]->streamline(shape::order(reinterpret_cast<Nd4jLong *>(outputShapes[e])));
}
for (auto v: inputs)
delete v;
for (auto v: outputs)
delete v;
return Status::OK();
}
int execCustomOp(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputs, Nd4jPointer* outputBuffers, Nd4jPointer* outputShapes, int numOutputs, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool* bArgs, int numBArgs, bool isInplace) {
try {
auto op = sd::ops::OpRegistrator::getInstance().getOperation(hash);
return realExec(op, extraPointers, hash, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes,
numOutputs, tArgs, numTArgs, iArgs, numIArgs, bArgs, numBArgs, isInplace);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return 1;
}
}
int execCustomOp2(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer opContext) {
try {
auto op = sd::ops::OpRegistrator::getInstance().getOperation(hash);
auto context = reinterpret_cast<Context *>(opContext);
auto result = op->execute(context);
auto res = hipStreamSynchronize(*context->launchContext()->getCudaStream());
if (res != 0)
throw sd::cuda_exception::build("customOp execution failed", res);
for (auto v:context->fastpath_in()) {
if (!v->isEmpty())
v->syncToDevice();
}
for (auto v:context->fastpath_out()) {
if (!v->isEmpty())
v->syncToDevice();
}
return result;
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return 1;
}
}
int registerGraph(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer flatBufferPointer) {
try {
auto graph = sd::graph::GraphExecutioner::importFromFlatPointer(flatBufferPointer);
sd::graph::GraphHolder::getInstance().registerGraph(graphId, graph);
return ND4J_STATUS_OK;
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return 1;
}
}
static VariablesSet* executeStoredGraphT(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int* inputIndices, int numInputs) {
auto graph = sd::graph::GraphHolder::getInstance().pullGraph(graphId);
auto varSpace = graph->getVariableSpace()->clone();
std::vector<sd::NDArray*> handles;
for (int e = 0; e < numInputs; e++) {
auto idx = inputIndices[e];
// we'll delete this array later, together with cloned VariableSpace
auto array = new sd::NDArray(inputBuffers[e], reinterpret_cast<Nd4jLong *>(inputShapes[e]));
handles.emplace_back(array);
if (varSpace->hasVariable(idx)) {
auto var = varSpace->getVariable(idx);
if (var->hasNDArray())
delete var->getNDArray();
var->setNDArray(array);
} else
varSpace->putVariable(idx, array);
}
auto dZ = sd::graph::GraphExecutioner::execute(graph, varSpace);
auto varSet = new sd::graph::VariablesSet(dZ);
if (dZ == ND4J_STATUS_OK) {
// pull back results, and provide them
auto outputs = graph->fetchOutputs();
for (int e = 0; e < outputs->size(); e++) {
// we're only getting variable ID/Index from original grap. values will be taken from cloned workspace
std::pair<int, int> varId(outputs->at(e)->id(), outputs->at(e)->index());
auto var = varSpace->getVariable(varId);
varSet->push_back(var->clone());
}
delete outputs;
}
delete varSpace;
return varSet;
}
VariablesSet* executeStoredGraph(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int* inputIndices, int numInputs) {
try {
return executeStoredGraphT(extraPointers, graphId, inputBuffers, inputShapes, inputIndices, numInputs);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
Nd4jLong getVariablesSetSize(sd::graph::VariablesSet* set) {
return set->size();
}
Nd4jStatus getVariablesSetStatus(sd::graph::VariablesSet* set) {
return set->status();
}
sd::graph::Variable* getVariable(sd::graph::VariablesSet* set, Nd4jLong i) {
return set->at(i);
}
int getVariableId(sd::graph::Variable* variable) {
return variable->id();
}
int getVariableIndex(sd::graph::Variable* variable) {
return variable->index();
}
const char* getVariableName(sd::graph::Variable* variable) {
return variable->getName()->c_str();
}
Nd4jLong const* getVariableShape(sd::graph::Variable* variable) {
return variable->getNDArray()->shapeInfo();
}
void* getVariableBuffer(sd::graph::Variable* variable) {
return variable->getNDArray()->buffer();
}
int unregisterGraph(Nd4jPointer *extraPointers, Nd4jLong graphId) {
try {
sd::graph::GraphHolder::getInstance().dropGraphAny(graphId);
return ND4J_STATUS_OK;
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return 1;
}
}
void deletePointerArray(Nd4jPointer pointer) {
Nd4jPointer *ptr = reinterpret_cast<Nd4jPointer *>(pointer);
delete[] ptr;
}
void deleteCharArray(Nd4jPointer pointer) {
auto ptr = reinterpret_cast<char *>(pointer);
delete[] ptr;
}
void deleteIntArray(Nd4jPointer pointer) {
auto ptr = reinterpret_cast<int *>(pointer);
delete[] ptr;
}
void deleteLongArray(Nd4jPointer pointer) {
auto ptr = reinterpret_cast<Nd4jLong *>(pointer);
delete[] ptr;
}
void deleteVariablesSet(sd::graph::VariablesSet* pointer) {
delete pointer;
}
void deleteShapeList(Nd4jPointer shapeList) {
sd::ShapeList* list = reinterpret_cast<sd::ShapeList*>(shapeList);
//list->destroy();
delete list;
}
const char* getAllOperations() {
return sd::OpTracker::getInstance().exportOperations();
}
Nd4jPointer getGraphState(Nd4jLong id) {
return (Nd4jPointer) new sd::graph::GraphState(id);
}
void deleteGraphState(Nd4jPointer state) {
auto stateP = reinterpret_cast<sd::graph::GraphState*>(state);
delete stateP;
}
Nd4jStatus execCustomOpWithScope(Nd4jPointer *extraPointers, sd::graph::GraphState *state, Nd4jLong opHash, Nd4jLong *scopes, int numScopes, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int numInputs, Nd4jPointer *outputBuffers, Nd4jPointer *outputShapes, int numOutputs) {
/**
* That's basically exec, with VariableSpace provided in GraphState:
* depending on operation (i.e. while of if), different logic executors could be used
*/
auto graph = state->graph();
auto varSpace = state->variableSpace();
// Node is dynamically created, and has nothing beyond it: only inputs and outputs
// this node has id of 0, and inputs are
Node node(OpType_LOGIC, opHash, 0);
// mapping inputs
for (int e = 0; e < numInputs; e++) {
auto buffer = inputBuffers[e];
auto shapeInfo = reinterpret_cast<Nd4jLong *>(inputShapes[e]);
auto array = new sd::NDArray(buffer, shapeInfo, varSpace->launchContext());
// now we just put array to VarSpace
varSpace->putVariable(0, e, array);
node.pickInput(0, e);
}
// mapping scopes
for (int e = 0; e < numScopes; e++) {
// we should check scope existence in GraphState/Graph
int scopeId = (int) scopes[e];
if (!state->hasScope(scopeId)) {
// nd4j_printf("execCustomOpWithScope: referenced scope [%i] doesn't exist\n", scopeId);
return Status::THROW();
}
node.pickInput(scopeId, 0);
}
auto dZ = LogicExecutor::processNode(graph, &node);
if (dZ != Status::OK())
return dZ;
// mapping outputs
for (int e = 0; e < numOutputs; e++) {
auto buffer = outputBuffers[e];
auto shapeInfo = reinterpret_cast<Nd4jLong *>(outputShapes[e]);
NDArray array(buffer, shapeInfo, varSpace->launchContext());
// now we just put array to VarSpace to the same ID
//varSpace->putVariable(0, e, array);
auto t = varSpace->getVariable(0, e)->getNDArray();
array.assign(t);
}
// removing input variables
for (int e = 0; e < numInputs; e++) {
varSpace->dropVariable(0, e);
}
// after some bla-bla-bla we should have Graph and Node for current op
return Status::OK();
}
Nd4jStatus execCustomOpWithScope(Nd4jPointer *extraPointers, Nd4jPointer state, Nd4jLong opHash, Nd4jLong *scopes, int numScopes, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int numInputs, Nd4jPointer *outputBuffers, Nd4jPointer *outputShapes, int numOutputs) {
try {
return execCustomOpWithScope(extraPointers, reinterpret_cast<sd::graph::GraphState *>(state), opHash, scopes,
numScopes, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes,
numOutputs);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return 1;
}
}
void deleteResultWrapper(Nd4jPointer ptr) {
// just 0 room for compiler s@!t
auto p = reinterpret_cast<sd::graph::ResultWrapper *>(ptr);
delete p;
}
int estimateThreshold(Nd4jPointer *extraPointers, Nd4jPointer dX, Nd4jLong const* dXShapeInfo, int N, float threshold) {
throw std::runtime_error("estimateThreshold: Not implemented yet");
}
/*
* TypeDef:
* void convertTypes(Nd4jPointer *extras, int srcType, Nd4jPointer dX, long N, int dstType, Nd4jPointer dZ);
*/
void convertTypes(Nd4jPointer *extras, int srcType, Nd4jPointer dX, Nd4jLong N, int dstType, Nd4jPointer dZ) {
try {
auto dx = reinterpret_cast<void *>(dX);
auto dz = reinterpret_cast<void *>(dZ);
if (srcType == ND4J_FLOAT8) {
if (dstType == ND4J_FLOAT8) {
// convertKernel<double, sd::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
//sd::TypeCast::convertGenericCuda<sd::float8, sd::int8>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
//sd::TypeCast::convertGenericCuda<sd::float8, sd::uint8>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
//sd::TypeCast::convertGenericCuda<sd::float8, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
//sd::TypeCast::convertGenericCuda<sd::float8, sd::int16>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
//sd::TypeCast::convertGenericCuda<sd::float8, sd::uint16>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
} else if (dstType == ND4J_FLOAT32) {
//sd::TypeCast::convertGenericCuda<sd::float8, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
//sd::TypeCast::convertGenericCuda<sd::float8, double>(extras, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_INT8) {
if (dstType == ND4J_FLOAT8) {
//sd::TypeCast::convertGenericCuda<sd::int8, sd::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
//convertKernel<sd::int8, sd::int8>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
sd::TypeCast::convertGenericCuda<int8_t, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
sd::TypeCast::convertGenericCuda<int8_t, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
sd::TypeCast::convertGenericCuda<int8_t, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
sd::TypeCast::convertGenericCuda<int8_t, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
// TODO: eventually we might want to add it
} else if (dstType == ND4J_FLOAT32) {
sd::TypeCast::convertGenericCuda<int8_t, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
sd::TypeCast::convertGenericCuda<int8_t, double>(extras, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_UINT8) {
if (dstType == ND4J_FLOAT8) {
//sd::TypeCast::convertGenericCuda<uint8_t, sd::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
sd::TypeCast::convertGenericCuda<uint8_t, int8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
sd::TypeCast::convertGenericCuda<uint8_t, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
sd::TypeCast::convertGenericCuda<uint8_t, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
sd::TypeCast::convertGenericCuda<uint8_t, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
sd::TypeCast::convertGenericCuda<uint8_t, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
// TODO: still might want to add
} else if (dstType == ND4J_FLOAT32) {
sd::TypeCast::convertGenericCuda<uint8_t, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
sd::TypeCast::convertGenericCuda<uint8_t, double>(extras, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_FLOAT16) {
if (dstType == ND4J_FLOAT8) {
//sd::TypeCast::convertGenericCuda<float16, sd::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
sd::TypeCast::convertGenericCuda<float16, int8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
sd::TypeCast::convertGenericCuda<float16, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
sd::TypeCast::convertGenericCuda<float16, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
sd::TypeCast::convertGenericCuda<float16, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
sd::TypeCast::convertGenericCuda<float16, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
// TODO: .... ^^^
} else if (dstType == ND4J_FLOAT32) {
sd::TypeCast::convertGenericCuda<float16, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
sd::TypeCast::convertGenericCuda<float16, double>(extras, dx, N, dz);
} else if (dstType == ND4J_THRESHOLD) {
//sd::convertToThreshold<float16>(nullptr, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_INT16) {
if (dstType == ND4J_FLOAT8) {
//sd::TypeCast::convertGenericCuda<int16_t, sd::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
sd::TypeCast::convertGenericCuda<int16_t, int8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
sd::TypeCast::convertGenericCuda<int16_t, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
sd::TypeCast::convertGenericCuda<int16_t, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
sd::TypeCast::convertGenericCuda<int16_t, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
sd::TypeCast::convertGenericCuda<int16_t, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
// TODO...
} else if (dstType == ND4J_FLOAT32) {
sd::TypeCast::convertGenericCuda<int16_t, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
sd::TypeCast::convertGenericCuda<int16_t, double>(extras, dx, N, dz);
} else {
printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_FLOAT24) {
} else if (srcType == ND4J_FLOAT32) {
if (dstType == ND4J_FLOAT8) {
//sd::TypeCast::convertGenericCuda<float, sd::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
sd::TypeCast::convertGenericCuda<float, int8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
sd::TypeCast::convertGenericCuda<float, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
sd::TypeCast::convertGenericCuda<float, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
sd::TypeCast::convertGenericCuda<float, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
sd::TypeCast::convertGenericCuda<float, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
} else if (dstType == ND4J_DOUBLE) {
sd::TypeCast::convertGenericCuda<float, double>(extras, dx, N, dz);
} else if (dstType == ND4J_THRESHOLD) {
//sd::convertToThreshold<float>(nullptr, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_DOUBLE) {
if (dstType == ND4J_FLOAT8) {
//sd::TypeCast::convertGenericCuda<double, sd::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
sd::TypeCast::convertGenericCuda<double, int8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
sd::TypeCast::convertGenericCuda<double, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
sd::TypeCast::convertGenericCuda<double, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
sd::TypeCast::convertGenericCuda<double, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
sd::TypeCast::convertGenericCuda<double, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
} else if (dstType == ND4J_FLOAT32) {
sd::TypeCast::convertGenericCuda<double, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
//
} else if (dstType == ND4J_THRESHOLD) {
//sd::convertToThreshold<double>(nullptr, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_THRESHOLD) {
if (dstType == ND4J_FLOAT16) {
//sd::convertFromThreshold<float16>(nullptr, dx, N, dz);
} else if (dstType == ND4J_FLOAT32) {
//sd::convertFromThreshold<float>(nullptr, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
//sd::convertFromThreshold<double>(nullptr, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
Nd4jPointer createUtf8String(Nd4jPointer *extraPointers, const char *string, int length) {
auto u = new sd::utf8string(string, length);
return reinterpret_cast<Nd4jPointer>(u);
}
Nd4jLong getUtf8StringLength(Nd4jPointer *extraPointers, Nd4jPointer ptr) {
return reinterpret_cast<sd::utf8string*>(ptr)->_length;
}
char* getUtf8StringBuffer(Nd4jPointer *extraPointers, Nd4jPointer ptr) {
return reinterpret_cast<sd::utf8string*>(ptr)->_buffer;
}
void deleteUtf8String(Nd4jPointer *extraPointers, Nd4jPointer ptr) {
delete(reinterpret_cast<sd::utf8string*>(ptr));
}
///////////////////////////////////////////////////////////////////
template<typename T, typename I>
__global__ static void scatterUpdateCuda(const int opCode, const int numOfSubArrs,
void* vx, const Nd4jLong* xShapeInfo, const Nd4jLong *xOffsets,
void* vy, const Nd4jLong *yShapeInfo, const Nd4jLong *yOffsets,
const void* vindexes) {
__shared__ T *x, *y;
__shared__ Nd4jLong arrLenX, arrLenY;
auto indexes = reinterpret_cast<const I*>(vindexes);
for (int e = 0; e < numOfSubArrs; e++ ) {
const auto xIndex = indexes[e];
const bool isOwner = xIndex < gridDim.x ? blockIdx.x == xIndex : blockIdx.x == xIndex % gridDim.x;
if (!isOwner)
continue;
if (threadIdx.x == 0) {
x = reinterpret_cast<T*>(vx) + xOffsets[xIndex];
y = reinterpret_cast<T*>(vy) + yOffsets[e];
arrLenX = shape::length(xShapeInfo);
arrLenY = shape::length(yShapeInfo);
}
__syncthreads();
if (arrLenX != arrLenY)
return;
for (Nd4jLong i = threadIdx.x; i < arrLenX; i += blockDim.x) {
const auto xOffset = shape::getIndexOffset(i, xShapeInfo);
const auto yOffset = shape::getIndexOffset(i, yShapeInfo);
switch (opCode) {
case 0:
x[xOffset] += y[yOffset];
break;
case 1:
x[xOffset] -= y[yOffset];
break;
case 2:
x[xOffset] *= y[yOffset];
break;
case 3:
x[xOffset] /= y[yOffset];
break;
case 4:
x[xOffset] = y[yOffset] - x[xOffset];
break;
case 5:
x[xOffset] = y[yOffset] / x[xOffset];
break;
case 6:
x[xOffset] = y[yOffset];
break;
default:
continue;
}
}
__syncthreads();
}
}
template<typename T, typename I>
__host__ static void scatterUpdateCudaLauncher(const hipStream_t* stream, const int opCode, const int numOfSubArrs, void* vx, const Nd4jLong const* xShapeInfo, const Nd4jLong* xOffsets, void* vy, const Nd4jLong *yShapeInfo, const Nd4jLong *yOffsets, const void* indexes) {
hipLaunchKernelGGL(( scatterUpdateCuda<T, I>), dim3(512), dim3(256), MAX_NUM_THREADS, *stream, opCode, numOfSubArrs, vx, xShapeInfo, xOffsets, vy, yShapeInfo, yOffsets, indexes);
}
//////////////////////////////////////////////////////////////////////////
void scatterUpdate(Nd4jPointer *extraPointers, int opCode, int numOfSubArrs,
void* hX, Nd4jLong const* hXShapeInfo, Nd4jLong const* hXOffsets,
void* dX, Nd4jLong const* dXShapeInfo, Nd4jLong const* dXOffsets,
void* hY, Nd4jLong const* hYShapeInfo, Nd4jLong const* hYOffsets,
void* dY, Nd4jLong const* dYShapeInfo, Nd4jLong const* dYOffsets,
void* hIindexes, Nd4jLong const* hIndicesShapeInfo, void* dIindexes, Nd4jLong const* dIndicesShapeInfo) {
try {
auto stream = reinterpret_cast<hipStream_t *>(extraPointers[1]);
auto type = ArrayOptions::dataType(hXShapeInfo);
auto iType = ArrayOptions::dataType(hIndicesShapeInfo);
BUILD_DOUBLE_SELECTOR(type, iType, scatterUpdateCudaLauncher,
(stream, opCode, numOfSubArrs, dX, dXShapeInfo, dXOffsets, dY, dYShapeInfo, dYOffsets, dIindexes),
LIBND4J_TYPES, INDEXING_TYPES);
sd::DebugHelper::checkErrorCode(stream, "scatterUpdate(...) failed");
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void inspectArray(Nd4jPointer *extraPointers, Nd4jPointer buffer, Nd4jLong *shapeInfo, Nd4jPointer specialBuffer, Nd4jLong *specialShapeInfo, Nd4jPointer debugInfo) {
try {
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
auto p = reinterpret_cast<sd::DebugInfo *>(debugInfo);
NDArray array(buffer, specialBuffer, shapeInfo, &lc);
sd::DebugHelper::retrieveDebugStatistics(p, &array);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void __global__ tryPointerKernel(void* p, int len) {
auto buf = reinterpret_cast<int8_t*>(p);
auto tid = threadIdx.x + blockIdx.x * blockDim.x;
__shared__ int b;
if (tid < len)
atomicAdd(&b, buf[tid]);
__syncthreads();
if (threadIdx.x ==0 && blockIdx.x == 0)
printf("Pointer check complete: %i\n", b);
}
void tryPointer(Nd4jPointer extra, Nd4jPointer p, int len) {
try {
hipStream_t stream;
hipStreamCreate(&stream);
hipLaunchKernelGGL(( tryPointerKernel) , dim3(256), dim3(512), len + 64, stream, p, len);
auto e = hipStreamSynchronize(stream);
if (e != 0)
throw sd::cuda_exception::build("tryPointer failed", e);
hipStreamDestroy(stream);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
int dataTypeFromNpyHeader(void *header) {
return (int) cnpy::dataTypeFromHeader(reinterpret_cast<char *>(header));
}
OpaqueConstantShapeBuffer* shapeBuffer(int rank, Nd4jLong *shape, Nd4jLong *strides, sd::DataType dtype, char order, Nd4jLong ews, bool empty) {
try {
auto buffer = new ConstantShapeBuffer();
*buffer = sd::ConstantShapeHelper::getInstance().bufferForShapeInfo(
ShapeDescriptor(dtype, order, shape, strides, rank, ews, empty));
return buffer;
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
void deleteConstantShapeBuffer(OpaqueConstantShapeBuffer* ptr) {
delete ptr;
}
void deleteConstantDataBuffer(OpaqueConstantDataBuffer* ptr) {
delete ptr;
}
void deleteTadPack(sd::TadPack* ptr) {
delete ptr;
}
bool isBlasVersionMatches(int major, int minor, int build) {
auto result = major == Environment::getInstance()._blasMajorVersion && minor == Environment::getInstance()._blasMinorVersion && build == Environment::getInstance()._blasPatchVersion;
if (!result) {
nd4j_printf("CUDA/cuBLAS version mismatch. Expected: %i.%i.%i but got %i.%i.%i instead\n", Environment::getInstance()._blasMajorVersion, Environment::getInstance()._blasMinorVersion, Environment::getInstance()._blasPatchVersion, major, minor, build);
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(152);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("CUDA/cuBLAS version mismatch");
}
return result;
}
sd::ConstantDataBuffer* constantBufferLong(sd::DataType dtype, Nd4jLong const* data, int length) {
return sd::ConstantHelper::getInstance().constantBuffer(ConstantDescriptor(data, length), dtype);
}
sd::ConstantDataBuffer* constantBufferDouble(sd::DataType dtype, double *data, int length) {
return sd::ConstantHelper::getInstance().constantBuffer(ConstantDescriptor(data, length), dtype);
}
sd::ConstantDataBuffer* constantBuffer(sd::DataType dtype, sd::ConstantDescriptor *descriptor) {
return sd::ConstantHelper::getInstance().constantBuffer(*descriptor, dtype);
}
Nd4jPointer getConstantDataBufferPrimary(sd::ConstantDataBuffer* dbf) {
return dbf->primary();
}
Nd4jPointer getConstantDataBufferSpecial(sd::ConstantDataBuffer* dbf) {
return dbf->special();
}
Nd4jLong getConstantDataBufferLength(sd::ConstantDataBuffer* dbf) {
return dbf->length();
}
Nd4jLong getConstantDataBufferSizeOf(sd::ConstantDataBuffer* dbf) {
return dbf->sizeOf();
}
Nd4jPointer getConstantShapeBufferPrimary(sd::ConstantShapeBuffer* dbf) {
return const_cast<Nd4jLong*>(dbf->primary());
}
Nd4jPointer getConstantShapeBufferSpecial(sd::ConstantShapeBuffer* dbf) {
return const_cast<Nd4jLong*>(dbf->special());
}
sd::graph::Context* createGraphContext(int nodeId) {
return new sd::graph::Context(nodeId);
}
sd::graph::RandomGenerator* getGraphContextRandomGenerator(sd::graph::Context* ptr) {
return &ptr->randomGenerator();
}
void markGraphContextInplace(sd::graph::Context* ptr, bool reallyInplace) {
ptr->markInplace(reallyInplace);
}
void setGraphContextCudaContext(sd::graph::Context* ptr, void *stream, void *reductionPointer, void *allocationPointer) {
ptr->setCudaContext(stream, reductionPointer, allocationPointer);
}
void setGraphContextInputArray(sd::graph::Context* ptr, int index, void *buffer, void *shapeInfo, void *specialBuffer, void *specialShapeInfo) {
ptr->setInputArray(index, buffer, shapeInfo, specialBuffer, specialShapeInfo);
}
void setGraphContextOutputArray(sd::graph::Context* ptr, int index, void *buffer, void *shapeInfo, void *specialBuffer, void *specialShapeInfo) {
ptr->setOutputArray(index, buffer, shapeInfo, specialBuffer, specialShapeInfo);
}
void setGraphContextInputBuffer(OpaqueContext* ptr, int index, OpaqueDataBuffer *buffer, void *shapeInfo, void *specialShapeInfo) {
ptr->setInputArray(index, buffer, shapeInfo, specialShapeInfo);
}
void setGraphContextOutputBuffer(OpaqueContext* ptr, int index, OpaqueDataBuffer *buffer, void *shapeInfo, void *specialShapeInfo) {
ptr->setOutputArray(index, buffer, shapeInfo, specialShapeInfo);
}
void setGraphContextTArguments(sd::graph::Context* ptr, double *arguments, int numberOfArguments) {
ptr->setTArguments(arguments, numberOfArguments);
}
void setGraphContextIArguments(sd::graph::Context* ptr, Nd4jLong *arguments, int numberOfArguments) {
ptr->setIArguments(arguments, numberOfArguments);
}
void setGraphContextBArguments(sd::graph::Context* ptr, bool *arguments, int numberOfArguments) {
ptr->setBArguments(arguments, numberOfArguments);
}
void setGraphContextDArguments(OpaqueContext* ptr, int *arguments, int numberOfArguments) {
std::vector<sd::DataType> dtypes(numberOfArguments);
for (int e = 0; e < numberOfArguments; e++)
dtypes[e] = (sd::DataType) arguments[e];
ptr->setDArguments(dtypes);
}
void deleteGraphContext(sd::graph::Context* ptr) {
delete ptr;
}
sd::graph::RandomGenerator* createRandomGenerator(Nd4jLong rootSeed, Nd4jLong nodeSeed) {
try {
return new sd::graph::RandomGenerator(rootSeed, nodeSeed);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
Nd4jLong getRandomGeneratorRootState(sd::graph::RandomGenerator* ptr) {
return ptr->rootState();
}
Nd4jLong getRandomGeneratorNodeState(sd::graph::RandomGenerator* ptr) {
return ptr->nodeState();
}
void setRandomGeneratorStates(sd::graph::RandomGenerator* ptr, Nd4jLong rootSeed, Nd4jLong nodeSeed) {
ptr->setStates(rootSeed, nodeSeed);
}
float getRandomGeneratorRelativeFloat(sd::graph::RandomGenerator* ptr, Nd4jLong index) {
return ptr->relativeT<float>(index);
}
double getRandomGeneratorRelativeDouble(sd::graph::RandomGenerator* ptr, Nd4jLong index) {
return ptr->relativeT<double>(index);
}
int getRandomGeneratorRelativeInt(sd::graph::RandomGenerator* ptr, Nd4jLong index) {
return ptr->relativeInt(index);
}
Nd4jLong getRandomGeneratorRelativeLong(sd::graph::RandomGenerator* ptr, Nd4jLong index) {
return ptr->relativeLong(index);
}
void deleteRandomGenerator(sd::graph::RandomGenerator* ptr) {
delete ptr;
}
Nd4jPointer shapeBufferForNumpy(Nd4jPointer npyArray) {
try {
cnpy::NpyArray arr = cnpy::loadNpyFromPointer(reinterpret_cast<char *>(npyArray));
unsigned int shapeSize = arr.shape.size();
std::vector<Nd4jLong> shape(shapeSize);
bool _empty = false;
for (unsigned int i = 0; i < shapeSize; i++) {
shape[i] = arr.shape[i];
if (arr.shape[i] == 0)
_empty = true;
}
auto dtype = cnpy::dataTypeFromHeader(reinterpret_cast<char *>(npyArray));
Nd4jLong *shapeBuffer;
if (shape.size() == 1 && shape[0] == 0) {
// scalar case
shapeBuffer = sd::ShapeBuilders::createScalarShapeInfo(dtype);
} else if (_empty) {
if (shapeSize > 0)
shapeBuffer = sd::ShapeBuilders::emptyShapeInfo(dtype, arr.fortranOrder ? 'f' : 'c', shape);
else
shapeBuffer = sd::ShapeBuilders::emptyShapeInfo(dtype);
} else {
shapeBuffer = sd::ShapeBuilders::createShapeInfo(dtype, arr.fortranOrder ? 'f' : 'c', shape);
}
return (Nd4jPointer)(sd::ConstantShapeHelper::getInstance().createFromExisting(shapeBuffer, true)); // TO DO: this can lead to unpleasant crash sometimes
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
const char* runLightBenchmarkSuit(bool printOut) {
try {
sd::LightBenchmarkSuit suit;
auto result = suit.runSuit();
if (printOut)
nd4j_printf("%s\n", result.data());
auto chars = new char[result.length() + 1];
std::memcpy(chars, result.data(), result.length());
chars[result.length()] = (char) 0x0;
return chars;
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
const char* runFullBenchmarkSuit(bool printOut) {
try {
sd::FullBenchmarkSuit suit;
auto result = suit.runSuit();
if (printOut)
nd4j_printf("%s\n", result.data());
auto chars = new char[result.length() + 1];
std::memcpy(chars, result.data(), result.length());
chars[result.length()] = (char) 0x0;
return chars;
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
Nd4jLong getCachedMemory(int deviceId) {
return sd::ConstantHelper::getInstance().getCachedAmount(deviceId);
}
sd::LaunchContext* defaultLaunchContext() {
return LaunchContext::defaultContext();
}
Nd4jPointer lcScalarPointer(OpaqueLaunchContext* lc) {
return lc->getScalarPointer();
}
Nd4jPointer lcReductionPointer(OpaqueLaunchContext* lc) {
return lc->getReductionPointer();
}
Nd4jPointer lcAllocationPointer(OpaqueLaunchContext* lc) {
return lc->getAllocationPointer();
}
Nd4jPointer lcExecutionStream(OpaqueLaunchContext* lc) {
return lc->getCudaStream();
}
Nd4jPointer lcCopyStream(OpaqueLaunchContext* lc) {
return lc->getCudaSpecialStream();
}
Nd4jPointer lcBlasHandle(OpaqueLaunchContext* lc) {
return lc->getCublasHandle();
}
Nd4jPointer lcSolverHandle(OpaqueLaunchContext* lc) {
return lc->getCusolverHandle();
}
int lastErrorCode() {
return sd::LaunchContext::defaultContext()->errorReference()->errorCode();
}
const char* lastErrorMessage() {
return sd::LaunchContext::defaultContext()->errorReference()->errorMessage();
}
void ctxShapeFunctionOverride(OpaqueContext* ptr, bool reallyOverride) {
ptr->setShapeFunctionOverride(reallyOverride);
}
void ctxPurge(OpaqueContext* ptr) {
ptr->clearFastPath();
}
int binaryLevel() {
return 0;
}
int optimalLevel() {
return 0;
}
bool isMinimalRequirementsMet() {
return true;
}
bool isOptimalRequirementsMet() {
return true;
}
void ctxAllowHelpers(OpaqueContext* ptr, bool reallyAllow) {
ptr->allowHelpers(reallyAllow);
}
void ctxSetExecutionMode(OpaqueContext* ptr, int execMode) {
if (execMode < 0 || execMode > 2)
execMode = 0;
ptr->setExecutionMode((samediff::ExecutionMode) execMode);
}
OpaqueDataBuffer* dbCreateExternalDataBuffer(Nd4jLong elements, int dataType, Nd4jPointer primary, Nd4jPointer special) {
auto buffer = dbAllocateDataBuffer(0, dataType, false);
if (primary != nullptr)
buffer->setPrimary(primary, elements);
if (special != nullptr)
buffer->setSpecial(special, elements);
return buffer;
}
OpaqueDataBuffer* dbAllocateDataBuffer(Nd4jLong elements, int dataType, bool allocateBoth) {
return allocateDataBuffer(elements, dataType, allocateBoth);
}
OpaqueDataBuffer* allocateDataBuffer(Nd4jLong elements, int dataType, bool allocateBoth) {
try {
auto dtype = DataTypeUtils::fromInt(dataType);
return new sd::InteropDataBuffer(elements * DataTypeUtils::sizeOf(dtype), dtype, allocateBoth);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
Nd4jPointer dbPrimaryBuffer(OpaqueDataBuffer *dataBuffer) {
return dataBuffer->primary();
}
Nd4jPointer dbSpecialBuffer(OpaqueDataBuffer *dataBuffer) {
return dataBuffer->special();
}
void deleteDataBuffer(OpaqueDataBuffer *dataBuffer) {
delete dataBuffer;
}
void dbSetPrimaryBuffer(OpaqueDataBuffer *dataBuffer, Nd4jPointer primaryBuffer, Nd4jLong numBytes) {
dataBuffer->setPrimary(primaryBuffer, numBytes);
}
void dbSetSpecialBuffer(OpaqueDataBuffer *dataBuffer, Nd4jPointer specialBuffer, Nd4jLong numBytes) {
dataBuffer->setSpecial(specialBuffer, numBytes);
}
void dbAllocatePrimaryBuffer(OpaqueDataBuffer *dataBuffer) {
dataBuffer->dataBuffer()->allocatePrimary();
}
void dbAllocateSpecialBuffer(OpaqueDataBuffer *dataBuffer) {
dataBuffer->dataBuffer()->allocateSpecial();
}
void dbExpandBuffer(OpaqueDataBuffer *dataBuffer, Nd4jLong elements) {
try {
dataBuffer->dataBuffer()->expand(elements * DataTypeUtils::sizeOf(dataBuffer->dataBuffer()->getDataType()));
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
OpaqueDataBuffer* dbCreateView(OpaqueDataBuffer *dataBuffer, Nd4jLong length, Nd4jLong offset) {
return new InteropDataBuffer(*dataBuffer, length, offset);
}
void dbSyncToSpecial(OpaqueDataBuffer *dataBuffer) {
dataBuffer->dataBuffer()->syncToSpecial();
}
void dbSyncToPrimary(OpaqueDataBuffer *dataBuffer) {
dataBuffer->dataBuffer()->syncToPrimary(nullptr);
}
void dbTickHostRead(OpaqueDataBuffer *dataBuffer) {
dataBuffer->dataBuffer()->readPrimary();
}
void dbTickHostWrite(OpaqueDataBuffer *dataBuffer) {
dataBuffer->dataBuffer()->writePrimary();
}
void dbTickDeviceRead(OpaqueDataBuffer *dataBuffer) {
dataBuffer->dataBuffer()->readSpecial();
}
void dbTickDeviceWrite(OpaqueDataBuffer *dataBuffer) {
dataBuffer->dataBuffer()->writeSpecial();
}
void dbExpand(OpaqueDataBuffer *dataBuffer, Nd4jLong elements) {
dataBuffer->expand(elements);
}
void dbClose(OpaqueDataBuffer *dataBuffer) {
dataBuffer->getDataBuffer()->close();
}
int dbDeviceId(OpaqueDataBuffer *dataBuffer) {
return dataBuffer->deviceId();
}
void dbSetDeviceId(OpaqueDataBuffer *dataBuffer, int deviceId) {
dataBuffer->setDeviceId(deviceId);
}
int dbLocality(OpaqueDataBuffer *dataBuffer) {
auto p = dataBuffer->dataBuffer()->isPrimaryActual();
auto d = dataBuffer->dataBuffer()->isSpecialActual();
if (p && d)
return 0;
else if (p)
return -1;
else
return 1;
} | e9fca4c03c50125ae38a8538f1f4697118eba2cf.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
#include <legacy/NativeOpExecutioner.h>
#include <legacy/NativeOps.h>
#include <cuda.h>
#include <system/buffer.h>
#include <loops/transform_any.h>
#include <loops/reduce_bool.h>
#include <loops/reduce_long.h>
#include <loops/scalar.h>
#include <helpers/threshold.h>
#include <ops/specials_cuda.h>
#include <helpers/DebugHelper.h>
#include <execution/AffinityManager.h>
#include <exceptions/datatype_exception.h>
#include <exceptions/cuda_exception.h>
#include <helpers/CudaLaunchHelper.h>
#include <graph/GraphExecutioner.h>
#include <helpers/BlasHelper.h>
#include <graph/GraphHolder.h>
#include <ops/declarable/CustomOperations.h>
#include <helpers/PointersManager.h>
//#include <sys/time.h>
#include <curand.h>
#include <graph/Status.h>
#include <helpers/DebugHelper.h>
using namespace sd;
#include <loops/special_kernels.h>
#include <performance/benchmarking/FullBenchmarkSuit.h>
#include <performance/benchmarking/LightBenchmarkSuit.h>
cudaDeviceProp *deviceProperties;
cudaFuncAttributes *funcAttributes = new cudaFuncAttributes[64];
int blockLimit = 128;
int maxThreads = 512;
bool allowedP2P = false;
bool supportedP2P = false;
#ifdef __ND4J_EXPERIMENTAL__
bool experimentalSupport = true;
#else
bool experimentalSupport = false;
#endif
int minThreads = 32;
__constant__ char deviceConstantMemory[49152];
// this method just does type conversion in fancy way
int getDeviceId(Nd4jPointer ptrToDeviceId) {
return (int)(Nd4jLong)ptrToDeviceId;
}
/*
* Basic CUDA constants here: number of blocks per MP
*/
int getDeviceBlockThreshold(int deviceId) {
int ccMinor = deviceProperties[deviceId].minor;
int ccMajor = deviceProperties[deviceId].major;
int blockThreshold = 8;
if (ccMajor >= 5)
blockThreshold = 32;
else if (ccMajor == 3)
blockThreshold = 16;
else if (ccMajor < 3)
blockThreshold = 8;
return blockThreshold;
}
/*
* This message returns shared memory threshold value. default overflow ratio is 0.3
*/
int getDeviceSharedThreshold(int deviceId) {
int ccMinor = deviceProperties[deviceId].minor;
int ccMajor = deviceProperties[deviceId].major;
// please note threshold isn't multiple of 32, and that's NOT a mistake
int shmemThreshold;
if (ccMajor == 6 && ccMinor == 0)
shmemThreshold = 65536;
else if (ccMajor == 6 && ccMinor == 1)
shmemThreshold = 49152;
else if (ccMajor == 5 && ccMinor == 2)
shmemThreshold = 98304;
else if (ccMajor == 5)
shmemThreshold = 65536;
else if (ccMajor == 3 && ccMinor == 7)
shmemThreshold = 114688;
else shmemThreshold = 49152;
return shmemThreshold / 0.3;
}
sd::buffer::Buffer<Nd4jLong> * createScalarBuffer(cudaStream_t stream) {
auto scalarShapeInfo = shape::createScalarShapeInfo();
auto buff = sd::buffer::createBuffer(scalarShapeInfo,shape::shapeInfoLength(2), stream);
sd::buffer::copyDataToGpu(&buff, stream);
return buff;
}
class ScalarShapeInformation {
private:
sd::buffer::Buffer<Nd4jLong> *scalarDimension;
sd::buffer::Buffer<Nd4jLong> *scalarShapeInfo;
// std::thread::id threadId;
public:
ScalarShapeInformation(cudaStream_t stream) {
auto scalarDimensionBuff = reinterpret_cast<Nd4jLong *>(malloc(sizeof(Nd4jLong)));
CHECK_ALLOC(scalarDimensionBuff, "Failed to allocate ShapeInfoBuffer", sizeof(Nd4jLong));
scalarDimensionBuff[0] = MAX_DIMENSION;
scalarDimension = sd::buffer::createBuffer(scalarDimensionBuff,1, stream);
scalarShapeInfo = createScalarBuffer(stream);
// threadId = std::this_thread::get_id();
}
~ScalarShapeInformation() {
sd::buffer::freeBuffer(&scalarShapeInfo);
sd::buffer::freeBuffer(&scalarDimension);
}
Nd4jLong *getShapeInfoHostPointer() {
return scalarShapeInfo->data;
}
Nd4jLong * getShapeInfoGpuPointer() {
return scalarShapeInfo->gData;
}
Nd4jLong * getDimensionHostPointer() {
return scalarDimension->data;
}
Nd4jLong * getDimensionGpuPointer() {
return scalarDimension->gData;
}
};
template <typename T>
class ScalarInfo {
sd::buffer::Buffer<T> *scalarData;
ScalarShapeInformation *shapeInfo;
T finalResult;
cudaStream_t streamRef;
public:
ScalarInfo(cudaStream_t stream) {
T *scalarResult = reinterpret_cast<T*>(malloc(sizeof(T)));
CHECK_ALLOC(scalarResult, "Failed to allocate new scalar buffer", sizeof(T));
shapeInfo = new ScalarShapeInformation(stream);
scalarData = sd::buffer::createBuffer(scalarResult,1, stream);
streamRef = stream;
sd::buffer::copyDataToGpu(&scalarData, stream);
}
T getFinalResultFromDevice() {
sd::buffer::copyDataFromGpu(&scalarData, streamRef);
return scalarData->data[0];
}
/**
* Get the device shape information
* representing a scalar
*/
Nd4jLong *getDeviceShapeInfo() {
return shapeInfo->getShapeInfoGpuPointer();
}
/**
* Get the dZ pointers
*/
T *getDevicePointer() {
return scalarData->gData;
}
/**
* Get the infinite dimension device pointer
*/
Nd4jLong *getDimensionDevicePointer() {
return shapeInfo->getDimensionGpuPointer();
}
~ScalarInfo() {
sd::buffer::freeBuffer(&scalarData);
delete shapeInfo;
}
};
void execPairwiseTransform( Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbY, Nd4jLong const* hYShapeInfo, Nd4jLong const* dYShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execPairwiseTransform(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
dbY->primary(), hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), extraParams);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execPairwiseTransformBool(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbY, Nd4jLong const* hYShapeInfo, Nd4jLong const* dYShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execPairwiseBoolTransform(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
dbY->primary(), hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
extraParams);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execSummaryStatsScalar(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
bool biasCorrected) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execSummaryStatsScalar(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
extraParams,
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
biasCorrected);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execBroadcastBool(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbY, Nd4jLong const* hYShapeInfo, Nd4jLong const* dYShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbDimension, Nd4jLong const* hDimensionShape, Nd4jLong const* dDimensionShape) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto tadOnlyShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
auto tadOnlyShapeInfoZ = reinterpret_cast<Nd4jLong *>(extraPointers[12]);
auto tadOffsetsZ = reinterpret_cast<Nd4jLong *>(extraPointers[13]);
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execBroadcastBool(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
dbY->primary(), hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
extraParams,
dimension, dimensionLength,
tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
/**
*
* @param opNum
* @param dX
* @param dXShapeInfo
* @param dY
* @param dYShapeInfo
* @param dZ
* @param dZShapeInfo
* @param dimension
* @param dimensionLength
*/
void execBroadcast(
Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbY, Nd4jLong const* hYShapeInfo, Nd4jLong const* dYShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
OpaqueDataBuffer *dbDimension, Nd4jLong const* hDimensionShape, Nd4jLong const* dDimensionShape) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto tadOnlyShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
auto tadOnlyShapeInfoZ = reinterpret_cast<Nd4jLong *>(extraPointers[12]);
auto tadOffsetsZ = reinterpret_cast<Nd4jLong *>(extraPointers[13]);
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto yType = sd::ArrayOptions::dataType(hYShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execBroadcast(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
dbY->primary(), hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
dimension, dimensionLength,
tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
/**
*
* @param opNum
* @param dX
* @param dXShapeInfo
* @param extraParams
* @param dZ
* @param dZShapeInfo
*/
////////////////////////////////////////////////////////////////////////
void execReduceFloat(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduceFloatScalar(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
extraParams,
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduceSame(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduceSameScalar(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
extraParams,
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduceSame2(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const*hXShapeInfo, Nd4jLong const*dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbZ, Nd4jLong const*hZShapeInfo, Nd4jLong const*dZShapeInfo,
OpaqueDataBuffer *dbDimension, Nd4jLong const*hDimensionShape, Nd4jLong const*dDimensionShape) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
const auto zLen = shape::length(hZShapeInfo);
std::vector<int> dimensions(dimension, dimension + dimensionLength);
const Nd4jLong* zShapeInfoH = hZShapeInfo;
if(shape::rank(hXShapeInfo) - dimensionLength != shape::rank(hZShapeInfo) && zLen != 1) {
auto zPack = ConstantShapeHelper::getInstance().createShapeInfoWithNoUnitiesForReduce(hZShapeInfo, dimensions);
zShapeInfoH = reinterpret_cast<Nd4jLong const*>(zPack.primary());
}
std::vector<int> dims = (zLen != 1) ? ShapeUtils::evalDimsForReduceOp(shape::rank(hXShapeInfo), dimensions) : std::vector<int>();
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduceSame(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
extraParams,
dbZ->primary(), zShapeInfoH, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(zShapeInfoH).special(),
dims.data(), dims.size());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduceLong2(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const*hXShapeInfo, Nd4jLong const*dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbZ, Nd4jLong const*hZShapeInfo, Nd4jLong const*dZShapeInfo,
OpaqueDataBuffer *dbDimension, Nd4jLong const*hDimensionShape, Nd4jLong const*dDimensionShape) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
const auto zLen = shape::length(hZShapeInfo);
std::vector<int> dimensions(dimension, dimension + dimensionLength);
const Nd4jLong* zShapeInfoH = hZShapeInfo;
if(shape::rank(hXShapeInfo) - dimensionLength != shape::rank(hZShapeInfo) && zLen != 1) {
auto zPack = ConstantShapeHelper::getInstance().createShapeInfoWithNoUnitiesForReduce(hZShapeInfo, dimensions);
zShapeInfoH = reinterpret_cast<Nd4jLong const*>(zPack.primary());
}
std::vector<int> dims = (zLen != 1) ? ShapeUtils::evalDimsForReduceOp(shape::rank(hXShapeInfo), dimensions) : std::vector<int>();
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduceLong(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
extraParams,
dbZ->primary(), zShapeInfoH, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(zShapeInfoH).special(),
dims.data(), dims.size());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduceLong(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const*hXShapeInfo, Nd4jLong const*dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbZ, Nd4jLong const*hZShapeInfo, Nd4jLong const*dZShapeInfo) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
if (zType != sd::DataType::INT64)
throw datatype_exception::build("execReduceLong wrong Z data type", sd::DataType::INT64, zType);
auto xLength = shape::length(hXShapeInfo);
auto blockWidth = 256;
auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth);
dim3 launchDims(numBlocks, blockWidth, 32768);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceLongFunction,
::execReduceScalar(launchDims, stream, opNum,
dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), hXShapeInfo,
extraParams,
dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), hXShapeInfo,
nullptr, 0, reductionPointer, dTADShapeInfo), LIBND4J_TYPES, LONG_TYPES);
sd::DebugHelper::checkErrorCode(stream, "execReduceLong(...) failed");
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduceBool2(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const*hXShapeInfo, Nd4jLong const*dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbZ, Nd4jLong const*hZShapeInfo, Nd4jLong const*dZShapeInfo,
OpaqueDataBuffer *dbDimension, Nd4jLong const*hDimensionShape, Nd4jLong const*dDimensionShape) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
const auto zLen = shape::length(hZShapeInfo);
std::vector<int> dimensions(dimension, dimension + dimensionLength);
const Nd4jLong* zShapeInfoH = hZShapeInfo;
if(shape::rank(hXShapeInfo) - dimensionLength != shape::rank(hZShapeInfo) && zLen != 1) {
auto zPack = ConstantShapeHelper::getInstance().createShapeInfoWithNoUnitiesForReduce(hZShapeInfo, dimensions);
zShapeInfoH = reinterpret_cast<Nd4jLong const*>(zPack.primary());
}
std::vector<int> dims = (zLen != 1) ? ShapeUtils::evalDimsForReduceOp(shape::rank(hXShapeInfo), dimensions) : std::vector<int>();
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduceBool(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
extraParams,
dbZ->primary(), zShapeInfoH, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(zShapeInfoH).special(),
dims.data(), dims.size());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduceBool(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
if (zType != sd::DataType::BOOL)
throw std::runtime_error("execReduceBool requires Z operand to have BOOL type");
auto xLength = shape::length(hXShapeInfo);
auto blockWidth = 256;
auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth);
dim3 launchDims(numBlocks, blockWidth, 32768);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceBoolFunction,
::execReduceScalar(launchDims, stream, opNum,
dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), hXShapeInfo,
extraParams,
dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), hZShapeInfo,
nullptr, 0, reductionPointer, dTADShapeInfo), LIBND4J_TYPES, BOOL_TYPES);
sd::DebugHelper::checkErrorCode(stream, "execReduceBool(...) failed");
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
/**
*
* @param opNum
* @param dX
* @param dXShapeInfo
* @param extraParams
* @param dZ
* @param dZShapeInfo
* @param dimension
* @param dimensionLength
*/
////////////////////////////////////////////////////////////////////////
void execIndexReduce(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
OpaqueDataBuffer *dbDimension, Nd4jLong const* hDimensionShape, Nd4jLong const* dDimensionShape) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
auto tadPack = sd::ConstantTadHelper::getInstance().tadForDimensions(hXShapeInfo,
dimension,
shape::length(hDimensionShape));
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execIndexReduce(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
extraParams,
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
(int *) dbDimension->special(), dimensionLength,
tadPack.specialShapeInfo(), tadPack.specialOffsets());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
/**
*
* @param opNum
* @param dX
* @param dXShapeInfo
* @param extraParams
* @param dZ
* @param dZShapeInfo
*/
////////////////////////////////////////////////////////////////////////
void execReduceFloat2(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
OpaqueDataBuffer *dbDimension, Nd4jLong const* hDimensionShape, Nd4jLong const* dDimensionShape) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
const auto zLen = shape::length(hZShapeInfo);
std::vector<int> dimensions(dimension, dimension + dimensionLength);
const Nd4jLong* zShapeInfoH = hZShapeInfo;
if(shape::rank(hXShapeInfo) - dimensionLength != shape::rank(hZShapeInfo) && zLen != 1) {
auto zPack = ConstantShapeHelper::getInstance().createShapeInfoWithNoUnitiesForReduce(hZShapeInfo, dimensions);
zShapeInfoH = reinterpret_cast<Nd4jLong const*>(zPack.primary());
}
std::vector<int> dims = (zLen != 1) ? ShapeUtils::evalDimsForReduceOp(shape::rank(hXShapeInfo), dimensions) : std::vector<int>();
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduceFloat(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
extraParams,
dbZ->primary(), zShapeInfoH, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(zShapeInfoH).special(),
dims.data(), dims.size());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
/**
*
* @param opNum
* @param dX
* @param dXShapeInfo
* @param extraParams
*/
////////////////////////////////////////////////////////////////////////
void execIndexReduceScalar(
Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo){
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execIndexReduceScalar(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
extraParams,
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execTransformSame(Nd4jPointer *extraPointers,int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
auto tadShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[0] : nullptr);
auto tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[1] : nullptr);
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execTransformSame(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
extraParams,
tadShapeInfo, tadOffsets);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execTransformBool(Nd4jPointer *extraPointers,int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
auto tadShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[0] : nullptr);
auto tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[1] : nullptr);
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execTransformBool(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
extraParams,
tadShapeInfo, tadOffsets);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execTransformAny(Nd4jPointer *extraPointers,int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
auto streamSpecial = reinterpret_cast<cudaStream_t &>(extraPointers[4]);
LaunchContext lc(stream, streamSpecial, extraPointers[5], extraPointers[3],
reinterpret_cast<int *>(extraPointers[6]));
NativeOpExecutioner::execTransformAny(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
extraParams,
nullptr, nullptr);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execTransformStrict(Nd4jPointer *extraPointers,int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
auto tadShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[10] : nullptr);
auto tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[11] : nullptr);
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execTransformStrict(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
extraParams,
tadShapeInfo, tadOffsets);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execTransformFloat(Nd4jPointer *extraPointers,int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
auto tadShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[10] : nullptr);
auto tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[11] : nullptr);
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execTransformFloat(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
extraParams,
tadShapeInfo, tadOffsets);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void checkP2P() {
int curDevice = 0;
cudaGetDevice(&curDevice);
int devCnt = 0;
cudaGetDeviceCount(&devCnt);
if (curDevice < 0 && curDevice > devCnt)
curDevice = 0;
bool tempSupport = true;
if (devCnt > 1) {
for (int dX = 0; dX < devCnt; dX++) {
for (int dY = 0; dY < devCnt; dY++) {
if (dX == dY)
continue;
int canAccess = 0;
cudaSetDevice(dX);
cudaDeviceCanAccessPeer(&canAccess, dX , dY);
if (!canAccess) {
tempSupport = false;
break;
}
}
}
supportedP2P = tempSupport;
cudaSetDevice(curDevice);
} else {
// if we have only 1 device - we say that we support P2P, since all data will be on 1 device
supportedP2P = true;
}
}
void enableP2P(bool enable) {
if (enable == allowedP2P)
return;
int curDevice = 0;
cudaGetDevice(&curDevice);
int devCnt = 0;
cudaGetDeviceCount(&devCnt);
if (curDevice < 0 && curDevice > devCnt)
curDevice = 0;
if (devCnt > 1) {
for (int dX = 0; dX < devCnt; dX++) {
for (int dY = 0; dY < devCnt; dY++) {
if (dX == dY)
continue;
int canAccess = 0;
cudaSetDevice(dX);
cudaDeviceCanAccessPeer(&canAccess, dX , dY);
if (canAccess) {
if (enable) {
cudaDeviceEnablePeerAccess(dY, 0);
} else {
cudaDeviceDisablePeerAccess(dY);
}
} else {
if (sd::Environment::getInstance().isVerbose()) printf("Peer access [%i] -> [%i] isn't possible\n", dX, dY);
}
}
}
cudaSetDevice(curDevice);
}
allowedP2P = enable;
cudaSetDevice(curDevice);
}
bool isP2PAvailable() {
return supportedP2P;
}
void initializeDevicesAndFunctions() {
try {
int devCnt = 0;
cudaGetDeviceCount(&devCnt);
deviceProperties = new cudaDeviceProp[devCnt];
for (int i = 0; i < devCnt; i++) {
cudaSetDevice(i);
cudaGetDeviceProperties(&deviceProperties[i], i);
cudaDeviceSetLimit(cudaLimitStackSize, 4096);
}
cudaSetDevice(0);
checkP2P();
// enabling p2p gpu access if it's supported
if (supportedP2P && devCnt > 1)
enableP2P(allowedP2P);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void initializeFunctions(Nd4jPointer *functions) {
sd::BlasHelper::getInstance().initializeDeviceFunctions(functions);
/*
cublasSgemv = (CublasSgemv)functions[0];
cublasDgemv = (CublasDgemv)functions[1];
cublasHgemm = (CublasHgemm)functions[2];
cublasSgemm = (CublasSgemm)functions[3];
cublasDgemm = (CublasDgemm)functions[4];
cublasSgemmEx = (CublasSgemmEx)functions[5];
cublasHgemmBatched = (CublasHgemmBatched)functions[6];
cublasSgemmBatched = (CublasSgemmBatched)functions[7];
cublasDgemmBatched = (CublasDgemmBatched)functions[8];
*/
}
/**
* This method acquires memory chunk of requested size on host side
*
* @param pointer pointer that'll be used for allocation
* @param memorySize memory size, in bytes
* @param flags optional parameter
*/
Nd4jPointer mallocHost(Nd4jLong memorySize, int flags) {
Nd4jPointer pointer;
// cudaHostAllocMapped |cudaHostAllocPortable
auto res = cudaHostAlloc(reinterpret_cast<void **>(&pointer), memorySize + 8, cudaHostAllocDefault);
if (res != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(res);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaHostAlloc failed");
}
return reinterpret_cast<int8_t*>(pointer);
}
/**
* This method acquires memory chunk of requested size on specified device
*
* @param pointer pointer that'll be used for allocation
* @param memorySize memory size, in bytes
* @param ptrToDeviceId pointer to deviceId. For cuda that's just and int, for OpenCL that's pointer to device_id, etc
* @param flags optional parameter
*/
Nd4jPointer mallocDevice(Nd4jLong memorySize, int deviceId, int flags) {
Nd4jPointer pointer;
auto res = cudaMalloc(reinterpret_cast<void **>(&pointer), memorySize + 8);
if (res != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(res);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaMalloc failed");
}
return reinterpret_cast<int8_t*>(pointer);
}
/**
* This method releases previously allocated host memory space
*
* @param pointer pointer that'll be freed
*/
int freeHost(Nd4jPointer pointer) {
auto res = cudaFreeHost(reinterpret_cast<void *>(pointer));
if (res != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(res);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaFreeHost failed");
}
return 1L;
}
/**
* This method releases previously allocated memory space on device
*
* @param pointer pointer that'll be freed
* @param ptrToDeviceId pointer to deviceId.
*/
int freeDevice(Nd4jPointer pointer, int deviceId) {
auto res = cudaFree(reinterpret_cast<void *>(pointer));
// we're intentionally skipping
if (res != 0 && res != 1) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(res);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaFree failed");
}
return res == 0 ? 1L : 0L;
}
Nd4jPointer createContext() {
return 0L;
}
Nd4jPointer createStream() {
auto stream = new cudaStream_t();
auto dZ = cudaStreamCreate(stream);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaStreamCreate failed");
}
return stream;
}
Nd4jPointer createEvent() {
Nd4jPointer nativeEvent= (Nd4jPointer) malloc(sizeof(cudaEvent_t));
CHECK_ALLOC(nativeEvent, "Failed to allocate new CUDA event buffer", sizeof(cudaEvent_t));
auto dZ = cudaEventCreateWithFlags(reinterpret_cast<cudaEvent_t *>(&nativeEvent), cudaEventDisableTiming);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaEventCreateWithFlags failed");
}
return nativeEvent;
}
int registerEvent(Nd4jPointer event, Nd4jPointer stream) {
auto pEvent = reinterpret_cast<cudaEvent_t *>(&event);
auto pStream = reinterpret_cast<cudaStream_t *>(stream);
auto dZ = cudaEventRecord(*pEvent, *pStream);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaEventRecord failed");
}
return 1;
}
int setDevice(int deviceId) {
AffinityManager::setCurrentDevice(deviceId);
return 1;
}
Nd4jLong getDeviceFreeMemoryDefault() {
size_t memFree = 0;
size_t memTotal = 0;
cudaMemGetInfo(&memFree, &memTotal);
return (Nd4jLong) memFree;
}
Nd4jLong getDeviceFreeMemory(int device) {
int orig = -1;
cudaGetDevice(&orig);
if (device >= 0 && device != orig) {
cudaSetDevice(device);
}
size_t memFree = 0;
size_t memTotal = 0;
cudaMemGetInfo(&memFree, &memTotal);
if (device >= 0 && device != orig) {
cudaSetDevice(orig);
}
return (Nd4jLong) memFree;
}
Nd4jLong getDeviceTotalMemory(int device) {
int orig = -1;
cudaGetDevice(&orig);
if (device >= 0 && device != orig) {
cudaSetDevice(device);
}
size_t memFree = 0;
size_t memTotal = 0;
cudaMemGetInfo(&memFree, &memTotal);
if (device >= 0 && device != orig) {
cudaSetDevice(orig);
}
return (Nd4jLong) memTotal;
}
int memcpySync(Nd4jPointer dst, Nd4jPointer src, Nd4jLong size, int flags, Nd4jPointer reserved) {
cudaMemcpyKind kind;
switch (flags) {
case 0: {
kind = cudaMemcpyHostToHost;
}
break;
case 1: {
kind = cudaMemcpyHostToDevice;
}
break;
case 2: {
kind = cudaMemcpyDeviceToHost;
}
break;
case 3: {
kind = cudaMemcpyDeviceToDevice;
}
break;
default: {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("UNDEFNED MEMCPY");
return 0;
}
}
auto dZ = cudaMemcpy(reinterpret_cast<void *>(dst), const_cast<const void *>(reinterpret_cast<void *>(src)), static_cast<size_t>(size), kind);
if (dZ != 0) {
printf("Failed on [%p] -> [%p], size: [%i], direction: [%i], dZ: [%i]\n", src, dst, size, flags, static_cast<int>(dZ));
fflush(stdout);
fflush(stderr);
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaMemcpy failed");
return 0;
}
return 1;
}
int memcpyAsync(Nd4jPointer dst, Nd4jPointer src, Nd4jLong size, int flags, Nd4jPointer reserved) {
auto pStream = reinterpret_cast<cudaStream_t *>(reserved);
cudaMemcpyKind kind;
//sd::DebugHelper::checkErrorCode(pStream, "Preliminary sync failed");
switch (flags) {
case 0: {
kind = cudaMemcpyHostToHost;
}
break;
case 1: {
kind = cudaMemcpyHostToDevice;
}
break;
case 2: {
kind = cudaMemcpyDeviceToHost;
}
break;
case 3: {
kind = cudaMemcpyDeviceToDevice;
}
break;
default: {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("UNDEFNED MEMCPY");
return 0;
}
}
auto dZ = cudaMemcpyAsync(reinterpret_cast<void *>(dst), const_cast<const void *>(reinterpret_cast<void *>(src)), static_cast<size_t>(size), kind, *pStream);
//auto dZ = cudaMemcpy(reinterpret_cast<void *>(dst), const_cast<const void *>(reinterpret_cast<void *>(src)), static_cast<size_t>(size), kind);
if (dZ != 0) {
printf("Failed on [%p] -> [%p], size: [%i], direction: [%i], dZ: [%i]\n", src, dst, size, flags, static_cast<int>(dZ));
fflush(stdout);
fflush(stderr);
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaMemcpyAsync failed");
return 0;
}
return 1;
}
int memsetSync(Nd4jPointer dst, int value, Nd4jLong size, int flags, Nd4jPointer reserved) {
auto dZ = cudaMemset(reinterpret_cast<void *>(dst), value, static_cast<size_t>(size));
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaMemset failed");
}
return 1;
}
int memsetAsync(Nd4jPointer dst, int value, Nd4jLong size, int flags, Nd4jPointer reserved) {
auto pStream = reinterpret_cast<cudaStream_t *>(reserved);
auto dZ = cudaMemsetAsync(reinterpret_cast<void *>(dst), value, static_cast<size_t>(size), *pStream);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaMemsetAsync failed");
}
return 1;
}
int destroyEvent(Nd4jPointer event) {
auto pEvent = reinterpret_cast<cudaEvent_t *>(&event);
auto dZ = cudaEventDestroy(*pEvent);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaEventDestroy failed");
}
return 1;
}
int streamSynchronize(Nd4jPointer stream) {
auto pStream = reinterpret_cast<cudaStream_t *>(stream);
auto dZ = cudaStreamSynchronize(*pStream);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaStreamSynchronize failed");
}
return 1L;
}
int eventSynchronize(Nd4jPointer event) {
auto pEvent = reinterpret_cast<cudaEvent_t *>(&event);
auto dZ = cudaEventSynchronize(*pEvent);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaEventSynchronize failed");
}
return 1L;
}
int getAvailableDevices() {
int devCnt = 0;
cudaGetDeviceCount(&devCnt);
return devCnt;
}
void enableDebugMode(bool reallyEnable) {
sd::Environment::getInstance().setDebug(reallyEnable);
}
void setGridLimit(int gridSize) {
if (gridSize > 8192)
gridSize = 8192;
if (gridSize < 1)
gridSize = 1;
blockLimit = gridSize;
}
int ompGetMaxThreads() {
return maxThreads;
}
int ompGetNumThreads() {
return maxThreads;
}
void setOmpNumThreads(int threads) {
if (threads > 1024)
threads = 1024;
if (threads < 32)
threads = 32;
maxThreads = threads;
}
void enableVerboseMode(bool reallyEnable) {
sd::Environment::getInstance().setVerbose(reallyEnable);
}
int getDeviceMajor(int device) {
return deviceProperties[device].major;
}
int getDeviceMinor(int device) {
return deviceProperties[device].minor;
}
const char * getDeviceName(int device) {
return deviceProperties[device].name;
}
void specialConcat(
Nd4jPointer *extraPointers,
int dimension,
int numArrays,
Nd4jPointer *data,
Nd4jPointer *inputShapeInfo,
void *dZ,
Nd4jLong const* dZShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) {
try {
BUILD_SINGLE_SELECTOR(ArrayOptions::dataType(dZShapeInfo), sd::SpecialMethods,
::concatCpuGeneric(dimension, numArrays, data, inputShapeInfo, dZ, dZShapeInfo),
LIBND4J_TYPES);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
/**
* This method saves
*/
sd::TadPack* tadOnlyShapeInfo(Nd4jLong const* dXShapeInfo, int *dimension, int dimensionLength) {
try {
auto pack = new TadPack();
*pack = sd::ConstantTadHelper::getInstance().tadForDimensions(dXShapeInfo, dimension, dimensionLength);
return pack;
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
Nd4jLong const* getPrimaryShapeInfo(sd::TadPack* pack) {
return pack->primaryShapeInfo();
}
Nd4jLong const* getPrimaryOffsets(sd::TadPack* pack) {
return pack->primaryOffsets();
}
Nd4jLong const* getSpecialShapeInfo(sd::TadPack* pack) {
return pack->specialShapeInfo();
}
Nd4jLong const* getSpecialOffsets(sd::TadPack* pack) {
return pack->specialOffsets();
}
Nd4jLong getNumberOfTads(sd::TadPack* pack) {
return pack->numberOfTads();
}
int getShapeInfoLength(sd::TadPack* pack) {
return pack->shapeInfoLength();
}
int memcpyConstantAsync(Nd4jLong dst, Nd4jPointer src, Nd4jLong size, int flags, Nd4jPointer reserved) {
cudaStream_t *pStream = reinterpret_cast<cudaStream_t *>(reserved);
cudaMemcpyKind kind;
DEBUG_KERNEL(pStream, -1);
switch (flags) {
case 0: {
kind = cudaMemcpyHostToHost;
}
break;
case 1: {
kind = cudaMemcpyHostToDevice;
}
break;
case 2: {
kind = cudaMemcpyDeviceToHost;
}
case 3: {
kind = cudaMemcpyDeviceToDevice;
}
break;
}
auto dZ = cudaMemcpyToSymbolAsync(deviceConstantMemory, const_cast<const void *>(src), size, dst, kind, *pStream);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaMemcpyToSymbolAsync failed");
}
return 1;
}
Nd4jPointer getConstantSpace() {
Nd4jPointer dConstAddr;
cudaError_t dZ = cudaGetSymbolAddress(reinterpret_cast<void **>(&dConstAddr), deviceConstantMemory);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaGetSymbolAddress failed");
}
return dConstAddr;
}
void pullRows(Nd4jPointer *extraPointers,
OpaqueDataBuffer *dbX, Nd4jLong const* xShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* zShapeInfo, Nd4jLong const* dZShapeInfo,
Nd4jLong n,
Nd4jLong *indexes,
Nd4jLong const* tadShapeInfo,
Nd4jLong const* tadOffsets,
Nd4jLong const* zTadShapeInfo,
Nd4jLong const* zTadOffsets) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
dim3 launchDims(64, 256, 1024);
auto xType = sd::ArrayOptions::dataType(xShapeInfo);
BUILD_SINGLE_SELECTOR(xType, pullRowsKernelGeneric,
(launchDims, stream, dbX->special(), dbZ->special(), n, indexes, tadShapeInfo, tadOffsets, zTadShapeInfo, zTadOffsets),
LIBND4J_TYPES);
DEBUG_KERNEL(stream, -1);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void average(Nd4jPointer *extras,
Nd4jPointer *x, Nd4jLong const* xShapeInfo,
Nd4jPointer *dx, Nd4jLong const* dXShapeInfo,
void *z, Nd4jLong const* zShapeInfo,
void *dz, Nd4jLong const* dzShapeInfo,
int n,
Nd4jLong length,
bool propagate) {
try {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extras[1]);
int mode = getDeviceId(extras[3]);
auto dX = reinterpret_cast<void **>(dx);
if (sd::Environment::getInstance().isDebugAndVerbose())
printf("averageFloat called\n");
auto xType = sd::ArrayOptions::dataType(xShapeInfo);
// launching on gpu
if (mode == 0) {
dim3 launchDims(256, 256, 4096);
BUILD_SINGLE_SELECTOR(xType, averagingKernelGeneric, (launchDims, stream, dX, dz, n, length, propagate),
LIBND4J_TYPES);
sd::DebugHelper::checkErrorCode(stream, "AverageFloat(...) failed");
} else {
// launching on host memory
BUILD_SINGLE_SELECTOR(xType, sd::SpecialMethods, ::averageGeneric(x, z, zShapeInfo, n, length, propagate),
LIBND4J_TYPES);
}
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void accumulate(Nd4jPointer *extras,
Nd4jPointer *x, Nd4jLong const* xShapeInfo,
Nd4jPointer *dx, Nd4jLong const* dXShapeInfo,
void *z, Nd4jLong const* zShapeInfo,
void *dz, Nd4jLong const* dzShapeInfo,
int n,
Nd4jLong length) {
try {
auto stream = reinterpret_cast<cudaStream_t *>(extras[1]);
int mode = getDeviceId(extras[3]);
auto dX = reinterpret_cast<void **>(dx);
if (sd::Environment::getInstance().isDebugAndVerbose())
printf("accumulateFloat called\n");
auto xType = sd::ArrayOptions::dataType(xShapeInfo);
// launching on gpu
if (mode == 0) {
dim3 launchDims(n, 256, 16384);
BUILD_SINGLE_SELECTOR(xType, accumulateKernelGeneric, (launchDims, stream, dX, dz, n, length),
LIBND4J_TYPES);
sd::DebugHelper::checkErrorCode(stream, "AccumulateFloat(...) failed");
} else {
// launching on host memory
BUILD_SINGLE_SELECTOR(xType, sd::SpecialMethods, ::accumulateGeneric(x, z, zShapeInfo, n, length),
LIBND4J_TYPES);
}
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void shuffle(Nd4jPointer *extras,
Nd4jPointer *x, Nd4jPointer *xShapeInfo,
Nd4jPointer *dx, Nd4jPointer *dXShapeInfo,
Nd4jPointer *z, Nd4jPointer *zShapeInfo,
Nd4jPointer *dz, Nd4jPointer *dZShapeInfo,
int N,
int *shuffleMap,
Nd4jPointer *tadShapeInfo,
Nd4jPointer *tadOffsets) {
try {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extras[1]);
auto dX = reinterpret_cast<void **>(dx);
auto dZ = reinterpret_cast<void **>(dz);
auto xShape = reinterpret_cast<Nd4jLong**>(xShapeInfo);
auto dxShape = reinterpret_cast<Nd4jLong**>(dXShapeInfo);
auto tadOnlyShapeInfo = reinterpret_cast<Nd4jLong **>(tadShapeInfo);
auto tadOffset = reinterpret_cast<Nd4jLong **>(tadOffsets);
auto xType = sd::ArrayOptions::dataType(xShape[0]);
dim3 launchDims(256, 512, 8192);
BUILD_SINGLE_SELECTOR(xType, shuffleKernelGeneric,
(launchDims, stream, dX, dxShape, dZ, N, shuffleMap, tadOnlyShapeInfo, tadOffset),
LIBND4J_TYPES);
sd::DebugHelper::checkErrorCode(stream, "shuffle(...) failed");
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
bool isExperimentalEnabled() {
return sd::Environment::getInstance().isExperimentalBuild();
}
void setOmpMinThreads(int threads) {
minThreads = sd::math::nd4j_max<int>(32, threads);
minThreads = sd::math::nd4j_min<int>(maxThreads, minThreads);
}
int getDevice() {
return sd::AffinityManager::currentDeviceId();
}
void setElementThreshold(int num) {
// this is no-op for CUDA
}
void setTADThreshold(int num) {
// this is no-op for CUDA
}
////////////////////////////////////////////////////////////////////////
void execSummaryStats(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
bool biasCorrected) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execSummaryStats(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
extraParams,
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
biasCorrected);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execSummaryStatsTad(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
OpaqueDataBuffer *dbDimension, Nd4jLong const* hDimensionShape, Nd4jLong const* dDimensionShape,
bool biasCorrected,
Nd4jLong const* tadShapeInfo, Nd4jLong const* tadOffsets) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbDimension});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execSummaryStats(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
extraParams,
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
reinterpret_cast<int *>(dbDimension->special()), dimensionLength,
tadShapeInfo, tadOffsets,
biasCorrected);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbDimension});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduce3(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbY, Nd4jLong const* hYShapeInfo, Nd4jLong const* dYShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduce3(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
extraParams,
dbY->primary(), hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduce3Tad(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbY, Nd4jLong const* hYShapeInfo, Nd4jLong const* dYShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
OpaqueDataBuffer *dbDimension, Nd4jLong const* hDimensionShape, Nd4jLong const* dDimensionShape,
Nd4jLong const* tadOnlyShapeInfo, Nd4jLong const* tadOffsets,
Nd4jLong const* yTadOnlyShapeInfo, Nd4jLong const* yTadOffsets) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
auto tadPack = sd::ConstantTadHelper::getInstance().tadForDimensions(hXShapeInfo,
dimension,
shape::length(hDimensionShape));
auto tadLength = shape::length(tadPack.primaryShapeInfo());
auto yLength = shape::length(hYShapeInfo);
auto xLength = shape::length(hXShapeInfo);
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
if (tadLength == yLength || tadLength == xLength) {
// nd4j_printf("== way\n","");
NativeOpExecutioner::execReduce3(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
extraParams,
dbY->primary(), hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
dimension, dimensionLength,
tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets);
} else
NativeOpExecutioner::execReduce3TAD(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
extraParams,
dbY->primary(), hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
dimension, dimensionLength,
tadOnlyShapeInfo, yTadOffsets, yTadOnlyShapeInfo, yTadOffsets);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduce3Scalar(Nd4jPointer *extraPointers,int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbY, Nd4jLong const* hYShapeInfo, Nd4jLong const* dYShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduce3Scalar(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
extraParams,
dbY->primary(), hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execScalarBool(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
OpaqueDataBuffer *dbScalar, Nd4jLong const* hScalarShapeInfo, Nd4jLong const* dScalarShapeInfo,
void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbScalar});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execScalarBool(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
dbScalar->primary(), hScalarShapeInfo, dbScalar->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hScalarShapeInfo).special(),
extraParams);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbScalar});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execScalarBoolTad(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
OpaqueDataBuffer *dbScalars, Nd4jLong const* hScalarShapeInfo, Nd4jLong const* dScalarShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbDimension, Nd4jLong const* hDimensionShape, Nd4jLong const* dDimensionShape,
Nd4jLong const* tadShapeInfo, Nd4jLong const* tadOffsets,
Nd4jLong const* tadShapeInfoZ, Nd4jLong const* tadOffsetsZ) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbScalars});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execScalarBool(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
extraParams,
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
dbScalars->primary(), hScalarShapeInfo, dbScalars->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hScalarShapeInfo).special(),
dimension, dimensionLength,
tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbScalars});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execScalar(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
OpaqueDataBuffer *dbScalar, Nd4jLong const* hScalarShapeInfo, Nd4jLong const* dScalarShapeInfo,
void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbScalar});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execScalar(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
dbScalar->primary(), hScalarShapeInfo, dbScalar->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hScalarShapeInfo).special(),
extraParams);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbScalar});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execScalarTad(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
OpaqueDataBuffer *dbScalars, Nd4jLong const* hScalarShapeInfo, Nd4jLong const* dScalarShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbDimension, Nd4jLong const* hDimensionShape, Nd4jLong const* dDimensionShape,
Nd4jLong const* tadShapeInfo, Nd4jLong const* tadOffsets,
Nd4jLong const* tadShapeInfoZ, Nd4jLong const* tadOffsetsZ) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbScalars});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto yType = sd::ArrayOptions::dataType(hScalarShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
if (yType != xType && yType != sd::DataType::BOOL && !isExperimentalEnabled())
throw sd::datatype_exception::build("execScalar both operands must have same data type", xType, yType);
dim3 launchDims(256, 256, 16384);
#ifdef __ND4J_EXPERIMENTAL__
BUILD_PAIRWISE_SELECTOR(xType, yType, zType, functions::scalar::ScalarTransform, ::executeCudaAlongDimension(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalars, extraParams, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), LIBND4J_TYPES, LIBND4J_TYPES);
#else
BUILD_SINGLE_SELECTOR_THRICE(xType, functions::scalar::ScalarTransform, ::executeCudaAlongDimension(launchDims, stream, opNum, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), dbScalars->special(), extraParams, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), LIBND4J_TYPES);
#endif
DEBUG_KERNEL(stream, opNum);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbScalars});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void execAggregate(Nd4jPointer *extraPointers,
int opNum,
void **arguments,
int numArguments,
Nd4jLong **shapes,
int numShapes,
int *indexArguments,
int numIndexArguments,
int **intArrays,
int numIntArrays,
void *realArguments,
int numRealArguments,
sd::DataType dtype) {
}
void batchExecutor(Nd4jPointer *extraPointers,
int numAggregates,
int opNum,
int maxArgs,
int maxShapes,
int maxIntArrays,
int maxIntArraySize,
int maxIdx,
int maxReals,
void *ptrToArguments,
sd::DataType dtype) {
}
void execAggregateBatch(Nd4jPointer *extraPointers,
int numAggregates, int opNum,
int maxArgs, int maxShapes,
int maxIntArrays, int maxIntArraySize,
int maxIdx, int maxReals,
void *ptrToArguments, sd::DataType dtype) {
}
////////////////////////////////////////////////////////////////////////
void execRandom(Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer stateHost,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
void *extraArguments) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execRandom(&lc, opNum, stateHost,
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
extraArguments);
InteropDataBuffer::registerSpecialUse({dbZ}, {});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execRandom2(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
void *extraArguments) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execRandom(&lc, opNum, stateHost,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
extraArguments);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execRandom3(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbY, Nd4jLong const* hYShapeInfo, Nd4jLong const* dYShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
void *extraArguments) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execRandom(&lc, opNum, stateHost,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
dbY->primary(), hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
extraArguments);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
Nd4jPointer initRandom(Nd4jPointer *extraPointers, long seed, long bufferSize, Nd4jPointer ptrToBuffer) {
unsigned long long *ptrHost = reinterpret_cast<unsigned long long *>(extraPointers[0]);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
// we don't synchronize at random initialization, it's safe to go unsync here
// cudaStreamSynchronize(*stream);
auto ptrDev = reinterpret_cast<unsigned long long *>(ptrToBuffer);
auto buffer = new sd::random::RandomBuffer(seed, bufferSize, reinterpret_cast<uint64_t *>(ptrHost), reinterpret_cast<uint64_t *>(ptrDev));
buffer->propagateToDevice(buffer, *stream);
sd::DebugHelper::checkErrorCode(stream, "initRandom(...) failed A");
// we generate sequence in the host memory
sd::random::Xoroshiro128 generator(buffer);
generator.refreshBuffer();
// and copy it to gpu
cudaMemcpyAsync(ptrDev, ptrHost, bufferSize * 8, cudaMemcpyHostToDevice, *stream);
sd::DebugHelper::checkErrorCode(stream, "initRandom(...) failed B");
return buffer;
}
void destroyRandom(Nd4jPointer ptrBuffer) {
sd::random::RandomBuffer *buffer = reinterpret_cast<sd::random::RandomBuffer *> (ptrBuffer);
// FIXME: it's bad thing, but we can't know in advance, which stream(s) where using this generator in practice
cudaDeviceSynchronize();
delete buffer;
}
void refreshBuffer(Nd4jPointer *extraPointers, long seed, Nd4jPointer ptrRandom) {
sd::random::RandomBuffer *buffer = reinterpret_cast<sd::random::RandomBuffer *> (ptrRandom);
unsigned long long *ptrHost = reinterpret_cast<unsigned long long *>(extraPointers[0]);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
cudaStreamSynchronize(*stream);
uint64_t *ptrDev = buffer->getDeviceBuffer();
// update rng state
buffer->setSeed(seed);
buffer->setOffset(0);
buffer->propagateToDevice(buffer, *stream);
// refresh buffer on host size
sd::random::Xoroshiro128 generator(buffer);
generator.refreshBuffer();
// copy back to gpu
cudaMemcpyAsync(ptrDev, ptrHost, buffer->getSize() * 8, cudaMemcpyHostToDevice, *stream);
}
void reSeedBuffer(Nd4jPointer *extraPointers, long seed, Nd4jPointer ptrRandom) {
sd::random::RandomBuffer *buffer = reinterpret_cast<sd::random::RandomBuffer *> (ptrRandom);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
cudaStreamSynchronize(*stream);
// update rng state
buffer->reSeed(seed);
buffer->setOffset(0);
buffer->propagateToDevice(buffer, *stream);
}
/**
* Return the length of a shape buffer
* based on the pointer
* @param buffer the buffer pointer to check
* @return
*/
int lengthForShapeBufferPointer(Nd4jPointer buffer) {
auto shapeBuffer = reinterpret_cast<Nd4jLong *>(buffer);
return shape::shapeInfoLength(shape::rank(shapeBuffer));
}
/**
* The pointer to get the address for
*
* @param address the address to get the pointer
* @return the pointer for the given address
*/
Nd4jPointer pointerForAddress(Nd4jLong address) {
return reinterpret_cast<Nd4jPointer >(address);
}
void tear(Nd4jPointer *extras,
OpaqueDataBuffer *dbX, Nd4jLong const* xShapeInfo, Nd4jLong const* dXShapeInfo,
Nd4jPointer *targets,
Nd4jLong const* zShapeInfo,
Nd4jLong const* tadShapeInfo,
Nd4jLong const* tadOffsets) {
try {
InteropDataBuffer::prepareSpecialUse({}, {dbX});
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extras[1]);
dim3 launchDims(512, 512, 512);
auto xType = sd::ArrayOptions::dataType(xShapeInfo);
BUILD_SINGLE_SELECTOR(xType, tearKernelGeneric,
(launchDims, stream, dbX->special(), dXShapeInfo, targets, zShapeInfo, tadShapeInfo, tadOffsets),
LIBND4J_TYPES);
sd::DebugHelper::checkErrorCode(stream, "tearFloat(...) failed");
InteropDataBuffer::registerSpecialUse({}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void prescanArrayRecursive(Nd4jPointer *extras, int *dZ, int *dX, int numElements, int level) {
auto stream = reinterpret_cast<cudaStream_t *>(extras[1]);
auto g_scanBlockSums = reinterpret_cast<int **>(extras[2]);
int blockSize = 512; // max size of the thread blocks
int numBlocks = sd::math::nd4j_max<int>(1, static_cast<int>(ceil(static_cast<float>(numElements) / (2.f * blockSize))));
int numThreads;
if (numBlocks > 1)
numThreads = blockSize;
else if (sd::isPowerOfTwo(numElements))
numThreads = numElements / 2;
else
numThreads = sd::floorPow2(numElements);
int numEltsPerBlock = numThreads * 2;
// if this is a non-power-of-2 array, the last block will be non-full
// compute the smallest power of 2 able to compute its scan.
int numEltsLastBlock =
numElements - (numBlocks-1) * numEltsPerBlock;
int numThreadsLastBlock = sd::math::nd4j_max<int>(1, numEltsLastBlock / 2);
int np2LastBlock = 0;
int sharedMemLastBlock = 0;
if (numEltsLastBlock != numEltsPerBlock) {
np2LastBlock = 1;
if(!isPowerOfTwo(numEltsLastBlock))
numThreadsLastBlock = floorPow2(numEltsLastBlock);
unsigned int extraSpace = (2 * numThreadsLastBlock) / NUM_BANKS;
sharedMemLastBlock = sizeof(int) * (2 * numThreadsLastBlock + extraSpace);
}
// padding space is used to avoid shared memory bank conflicts
int extraSpace = numEltsPerBlock / NUM_BANKS;
int sharedMemSize = sizeof(int) * (numEltsPerBlock + extraSpace);
// setup execution parameters
// if NP2, we process the last block separately
dim3 grid(max(1, numBlocks - np2LastBlock), 1, 1);
dim3 threads(numThreads, 1, 1);
dim3 gridOnes(1, 1, 1);
dim3 threadsOnes(numThreadsLastBlock, 1, 1);
if (sharedMemSize < 2048)
sharedMemSize = 2048;
if (sharedMemLastBlock < 2048)
sharedMemLastBlock = 2048;
// execute the scan
if (numBlocks > 1) {
sd::prescanLauncher<true, false>(grid, threads, sharedMemSize, stream, dZ, dX, g_scanBlockSums[level], numThreads * 2, 0, 0);
if (np2LastBlock) {
sd::prescanLauncher<true, true>(gridOnes, threadsOnes, sharedMemLastBlock, stream, dZ, dX, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock);
}
// After scanning all the sub-blocks, we are mostly done. But now we
// need to take all of the last values of the sub-blocks and scan those.
// This will give us a new value that must be sdded to each block to
// get the final results.
// recursive (CPU) call
prescanArrayRecursive(extras, g_scanBlockSums[level], g_scanBlockSums[level], numBlocks, level+1);
sd::uniformAdd<<<grid, threads, 1024, *stream>>>(dZ, g_scanBlockSums[level], numElements - numEltsLastBlock, 0, 0);
if (np2LastBlock) {
sd::uniformAdd<<<1, numThreadsLastBlock, 1024, *stream>>>(dZ, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock);
}
} else if (isPowerOfTwo(numElements)) {
sd::prescanLauncher<false, false>(grid, threads, sharedMemSize, stream, dZ, dX, 0, numThreads * 2, 0, 0);
} else {
sd::prescanLauncher<false, true>(grid, threads, sharedMemSize, stream, dZ, dX, 0, numElements, 0, 0);
}
sd::DebugHelper::checkErrorCode(stream, "prescanArray(...) failed");
}
////////////////////////////////////////////////////////////////////////
void execReduce3All(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
void *extraParamsVals,
OpaqueDataBuffer *dbY, Nd4jLong const* hYShapeInfo, Nd4jLong const* dYShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
OpaqueDataBuffer *dbDimension, Nd4jLong const* hDimensionShape, Nd4jLong const* dDimensionShape,
Nd4jLong const* xTadShapeInfo, Nd4jLong const* xOffsets,
Nd4jLong const* yTadShapeInfo, Nd4jLong const* yOffsets) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY, dbDimension});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduce3All(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
extraParamsVals,
dbY->primary(), hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
reinterpret_cast<int *>(dbDimension->special()), dimensionLength,
xTadShapeInfo, xOffsets, yTadShapeInfo, yOffsets);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void sort(Nd4jPointer *extraPointers,
void *x, Nd4jLong const* xShapeInfo,
void *dX, Nd4jLong const* dXShapeInfo,
bool descending) {
try {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
auto xLength = shape::length(xShapeInfo);
auto xEWS = shape::elementWiseStride(xShapeInfo);
auto xType = sd::ArrayOptions::dataType(xShapeInfo);
// check if xLength is a power of 2, and use bitonic sort, if that's the case
if ((xLength != 0) && ((xLength & (xLength - 1)) == 0) && (xLength <= 1024 * 1024 * 10)) {
int numThreads = sd::math::nd4j_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0)
numBlocks++;
dim3 launchDims(numBlocks, numThreads, 32768);
for (int k = 2; k <= xLength; k = 2 * k) {
for (int j = k >> 1; j > 0; j = j >> 1) {
BUILD_SINGLE_SELECTOR(xType, bitonicSortStepGeneric,
(launchDims, stream, dX, dXShapeInfo, j, k, xLength, descending),
LIBND4J_TYPES);
}
}
} else {
int numThreads = sd::math::nd4j_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0)
numBlocks++;
numBlocks = sd::math::nd4j_min<int>(512, numBlocks);
dim3 launchDims(numBlocks, numThreads, 32768);
int max = 2, dg = 0;
while (max < xLength) {
max <<= 1;
dg++;
}
max <<= 1;
for (int window = 2; window < max; window <<= 1) {
int n = window;
int rev = 0;
do {
int half = n >> 1;
BUILD_SINGLE_SELECTOR(xType, bitonicArbitraryStepGeneric,
(launchDims, stream, dX, dXShapeInfo, n, xLength, rev, descending),
LIBND4J_TYPES);
n >>= 1;
rev = 1;
} while (n > 1);
}
}
sd::DebugHelper::checkErrorCode(stream, "sort(...) failed");
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void sortByKey(Nd4jPointer *extraPointers,
void *x, Nd4jLong const* xShapeInfo,
void *dX, Nd4jLong const* dXShapeInfo,
void *y, Nd4jLong const* yShapeInfo,
void *dy, Nd4jLong const* dyShapeInfo,
bool descending) {
try {
auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
auto xLength = shape::length(xShapeInfo);
auto yLength = shape::length(yShapeInfo);
auto xEWS = shape::elementWiseStride(xShapeInfo);
auto xType = sd::ArrayOptions::dataType(xShapeInfo);
auto yType = sd::ArrayOptions::dataType(yShapeInfo);
if (shape::isEmpty(xShapeInfo) || shape::isEmpty(yShapeInfo))
return;
if (xLength != yLength)
throw std::runtime_error("sortByKey: keys and values must have the same size");
// check if xLength is a power of 2, and use bitonic sort, if that's the case
if ((xLength != 0) && ((xLength & (xLength - 1)) == 0) && (xLength <= 1024 * 1024 * 10)) {
int numThreads = sd::math::nd4j_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0)
numBlocks++;
dim3 launchDims(numBlocks, numThreads, 32768);
for (int k = 2; k <= xLength; k = 2 * k) {
for (int j = k >> 1; j > 0; j = j >> 1) {
BUILD_DOUBLE_SELECTOR(xType, yType, bitonicSortStepGenericKey,
(launchDims, stream, dX, dXShapeInfo, dy, dyShapeInfo, j, k, xLength, descending),
LIBND4J_TYPES, LIBND4J_TYPES);
}
}
} else {
int numThreads = sd::math::nd4j_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0)
numBlocks++;
numBlocks = sd::math::nd4j_min<int>(512, numBlocks);
dim3 launchDims(numBlocks, numThreads, 32768);
int max = 2, dg = 0;
while (max < xLength) {
max <<= 1;
dg++;
}
max <<= 1;
for (int window = 2; window < max; window <<= 1) {
int n = window;
int rev = 0;
do {
int half = n >> 1;
BUILD_DOUBLE_SELECTOR(xType, yType, bitonicArbitraryStepGenericKey,
(launchDims, stream, dX, dXShapeInfo, dy, dyShapeInfo, n, xLength, rev, descending),
LIBND4J_TYPES, LIBND4J_TYPES);
n >>= 1;
rev = 1;
} while (n > 1);
}
}
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void sortByValue(Nd4jPointer *extraPointers,
void *x, Nd4jLong const* xShapeInfo,
void *dX, Nd4jLong const* dXShapeInfo,
void *y, Nd4jLong const* yShapeInfo,
void *dy, Nd4jLong const* dyShapeInfo,
bool descending) {
try {
auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
auto xLength = shape::length(xShapeInfo);
auto yLength = shape::length(yShapeInfo);
auto xEWS = shape::elementWiseStride(xShapeInfo);
auto xType = sd::ArrayOptions::dataType(yShapeInfo);
auto yType = sd::ArrayOptions::dataType(xShapeInfo);
if (shape::isEmpty(xShapeInfo) || shape::isEmpty(yShapeInfo))
return;
if (xLength != yLength)
throw std::runtime_error("sortByValue: keys and values must have the same size");
// check if xLength is a power of 2, and use bitonic sort, if that's the case
if ((xLength != 0) && ((xLength & (xLength - 1)) == 0) && (xLength <= 1024 * 1024 * 10)) {
int numThreads = sd::math::nd4j_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0)
numBlocks++;
dim3 launchDims(numBlocks, numThreads, 32768);
for (int k = 2; k <= xLength; k = 2 * k) {
for (int j = k >> 1; j > 0; j = j >> 1) {
BUILD_DOUBLE_SELECTOR(xType, yType, bitonicSortStepGenericKey,
(launchDims, stream, dy, dyShapeInfo, dX, dXShapeInfo, j, k, xLength, descending),
LIBND4J_TYPES, LIBND4J_TYPES);
}
}
} else {
int numThreads = sd::math::nd4j_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0)
numBlocks++;
numBlocks = sd::math::nd4j_min<int>(512, numBlocks);
dim3 launchDims(numBlocks, numThreads, 32768);
int max = 2, dg = 0;
while (max < xLength) {
max <<= 1;
dg++;
}
max <<= 1;
for (int window = 2; window < max; window <<= 1) {
int n = window;
int rev = 0;
do {
int half = n >> 1;
BUILD_DOUBLE_SELECTOR(xType, yType, bitonicArbitraryStepGenericKey,
(launchDims, stream, dy, dyShapeInfo, dX, dXShapeInfo, n, xLength, rev, descending),
LIBND4J_TYPES, LIBND4J_TYPES);
n >>= 1;
rev = 1;
} while (n > 1);
}
}
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void sortTadByKey(Nd4jPointer *extraPointers,
void *x, Nd4jLong const* xShapeInfo,
void *dX, Nd4jLong const* dXShapeInfo,
void *y, Nd4jLong const* yShapeInfo,
void *dy, Nd4jLong const* dyShapeInfo,
int *dimension,
int dimensionLength,
bool descending) {
try {
auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
auto context = extraPointers[0] == 0 ? LaunchContext::defaultContext()
: reinterpret_cast<LaunchContext *>(extraPointers[0]);
auto tadPack = sd::ConstantTadHelper::getInstance().tadForDimensions(xShapeInfo, dimension, dimensionLength);
dim3 launchDims((int) tadPack.numberOfTads(), 256, 2048);
auto xType = sd::ArrayOptions::dataType(xShapeInfo);
auto yType = sd::ArrayOptions::dataType(yShapeInfo);
BUILD_DOUBLE_SELECTOR(xType, yType, oesTadGenericKey,
(launchDims, stream, dX, dXShapeInfo, dy, dyShapeInfo, nullptr, dimensionLength, tadPack.platformShapeInfo(), tadPack.platformOffsets(), descending),
LIBND4J_TYPES, LIBND4J_TYPES);
sd::DebugHelper::checkErrorCode(stream, "sortTadKey(...) failed");
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void sortTadByValue(Nd4jPointer *extraPointers,
void *x, Nd4jLong const* xShapeInfo,
void *dX, Nd4jLong const* dXShapeInfo,
void *y, Nd4jLong const* yShapeInfo,
void *dy, Nd4jLong const* dyShapeInfo,
int *dimension,
int dimensionLength,
bool descending) {
try {
auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
auto context = extraPointers[0] == 0 ? LaunchContext::defaultContext()
: reinterpret_cast<LaunchContext *>(extraPointers[0]);
auto tadPack = sd::ConstantTadHelper::getInstance().tadForDimensions(xShapeInfo, dimension, dimensionLength);
dim3 launchDims((int) tadPack.numberOfTads(), 256, 2048);
auto xType = sd::ArrayOptions::dataType(yShapeInfo);
auto yType = sd::ArrayOptions::dataType(xShapeInfo);
BUILD_DOUBLE_SELECTOR(xType, yType, oesTadGenericKey,
(launchDims, stream, dy, dyShapeInfo, dX, dXShapeInfo, nullptr, dimensionLength, tadPack.platformShapeInfo(), tadPack.platformOffsets(), descending),
LIBND4J_TYPES, LIBND4J_TYPES);
sd::DebugHelper::checkErrorCode(stream, "sortTadValue(...) failed");
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void sortTad(Nd4jPointer *extraPointers,
void *x, Nd4jLong const* xShapeInfo,
void *dX, Nd4jLong const* dXShapeInfo,
int *dimension,
int dimensionLength,
Nd4jLong const* tadShapeInfo,
Nd4jLong const* tadOffsets,
bool descending) {
try {
// to be implemented
auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
auto context = extraPointers[0] == 0 ? LaunchContext::defaultContext()
: reinterpret_cast<LaunchContext *>(extraPointers[0]);
auto tadPack = sd::ConstantTadHelper::getInstance().tadForDimensions(xShapeInfo, dimension, dimensionLength);
dim3 launchDims((int) tadPack.numberOfTads(), 512, 33768);
auto xType = sd::ArrayOptions::dataType(xShapeInfo);
BUILD_SINGLE_SELECTOR(xType, oesTadGeneric,
(launchDims, stream, dX, dXShapeInfo, nullptr, dimensionLength, tadShapeInfo, tadOffsets, descending),
LIBND4J_TYPES);
sd::DebugHelper::checkErrorCode(stream, "sortTad(...) failed");
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void sortCooIndices(Nd4jPointer *extraPointers, Nd4jLong *indices, void *values, Nd4jLong length, int rank) {
throw std::runtime_error("sortCooIndices:: Not implemented yet");
}
Nd4jLong* mmapFile(Nd4jPointer *extraPointers, const char *fileName, Nd4jLong length) {
return nullptr;
}
void munmapFile(Nd4jPointer *extraPointers, Nd4jLong* ptrMap, Nd4jLong length) {
}
sd::graph::ResultWrapper* executeFlatGraph(Nd4jPointer *extraPointers, Nd4jPointer flatBufferPointer) {
try {
return sd::graph::GraphExecutioner::executeFlatBuffer(flatBufferPointer);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
Nd4jLong getResultWrapperSize(sd::graph::ResultWrapper* ptr) {
return ptr->size();
}
Nd4jPointer getResultWrapperPointer(sd::graph::ResultWrapper* ptr) {
return ptr->pointer();
}
const char* getAllCustomOps() {
return sd::ops::OpRegistrator::getInstance().getAllCustomOperations();
}
sd::ShapeList* _calculateOutputShapes(Nd4jPointer* extraPointers, sd::ops::DeclarableOp* op, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool *bArgs, int numBArgs, int *dArgs, int numDArgs) {
sd::graph::VariableSpace varSpace;
Context block(2, &varSpace);
sd::ShapeList inShapes;
for (int e = 0; e < numIArgs; e++)
block.getIArguments()->push_back(iArgs[e]);
for (int e = 0; e < numTArgs; e++)
block.getTArguments()->push_back(tArgs[e]);
for (int e = 0; e < numBArgs; e++)
block.getBArguments()->push_back(bArgs[e]);
for (int e = 0; e < numDArgs; e++)
block.getDArguments()->push_back((sd::DataType) dArgs[e]);
for (int e = 0; e < numInputShapes; e++) {
auto shape_ = reinterpret_cast<Nd4jLong *>(inputShapes[e]);
// we shouldn't copy buffer if that's empty array
void *buffer_ = sd::ArrayOptions::arrayType(shape_) == ArrayType::EMPTY ? nullptr : inputBuffers[e];
void *bufferD_ = sd::ArrayOptions::arrayType(shape_) == ArrayType::EMPTY ? nullptr : inputBuffers[e + numInputShapes];
auto array = new sd::NDArray(buffer_, bufferD_, shape_);
// block should contain references to proper variable
varSpace.putVariable(1, e, array);
block.pickInput(1, e);
inShapes.push_back(shape_);
}
auto shapeList = op->calculateOutputShape(&inShapes, block);
if (varSpace.launchContext()->getWorkspace() != nullptr)
shapeList->detach();
return shapeList;
}
sd::ShapeList* calculateOutputShapes2(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool *bArgs, int numBArgs, int *dArgs, int numDArgs) {
try {
auto op = sd::ops::OpRegistrator::getInstance().getOperation(hash);
return _calculateOutputShapes(extraPointers, op, inputBuffers, inputShapes, numInputShapes, tArgs, numTArgs,
iArgs, numIArgs, bArgs, numBArgs, dArgs, numDArgs);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
sd::ShapeList* _calculateOutputShapes(Nd4jPointer* extraPointers, sd::ops::DeclarableOp* op, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs) {
Context block(1);
sd::ShapeList inShapes;
for (int e = 0; e < numIArgs; e++)
block.getIArguments()->push_back(iArgs[e]);
for (int e = 0; e < numTArgs; e++)
block.getTArguments()->push_back(tArgs[e]);
for (int e = 0; e < numInputShapes; e++)
inShapes.push_back(reinterpret_cast<Nd4jLong *>(inputShapes[e]));
auto shapeList = op->calculateOutputShape(&inShapes, block);
return shapeList;
}
sd::ShapeList* calculateOutputShapes(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs) {
try {
auto op = sd::ops::OpRegistrator::getInstance().getOperation(hash);
return _calculateOutputShapes(extraPointers, op, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
Nd4jLong getShapeListSize(sd::ShapeList* list) {
return list->size();
}
Nd4jLong const* getShape(sd::ShapeList* list, Nd4jLong i) {
return list->at(i);
}
static FORCEINLINE Nd4jStatus realExec(sd::ops::DeclarableOp* op, Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputs, Nd4jPointer* outputBuffers, Nd4jPointer* outputShapes, int numOutputs, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool* bArgs, int numBArgs, bool isInplace) {
if (op == nullptr)
nd4j_printf("Can't find requested operation: [%lld]\n", hash);
// we're using the same fake nodeId everywhere here
std::vector<sd::NDArray*> inputs(numInputs);
std::vector<sd::NDArray*> outputs(numOutputs);
std::vector<double> ttArgs(numTArgs);
std::vector<bool> bbArgs(numBArgs);
std::vector<Nd4jLong> iiArgs(numIArgs);
// filling block now with inputs
for (int e = 0; e < numInputs; e++) {
auto shape = reinterpret_cast<Nd4jLong *>(inputShapes[e]);
void *buffer = sd::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : inputBuffers[e];
void *bufferD = sd::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : inputBuffers[e + numInputs];
inputs[e] = new sd::NDArray(buffer, bufferD, shape);
}
// if not inplace - transferring output arrays
if (!isInplace)
for (int e = 0; e < numOutputs; e++) {
// we want to keep original output shape intact
auto shape = shape::copyShape(reinterpret_cast<Nd4jLong *>(outputShapes[e]));
void *buffer = sd::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : outputBuffers[e];
void *bufferD = sd::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : outputBuffers[e + numOutputs];
// FIXME: revisit this.
bool canNullify = true;
for (int i = 0; i < numInputs; i++) {
void *ibuffer = sd::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : inputBuffers[i];
if (ibuffer == buffer) {
canNullify = false;
break;
}
}
if (canNullify && buffer != nullptr)
memset((uint8_t *) buffer, '\0', shape::length(shape) * DataTypeUtils::sizeOfElement(ArrayOptions::dataType(shape)));
auto array = new sd::NDArray(buffer, bufferD, shape);
outputs[e] = array;
}
for (int e = 0; e < numIArgs; e++)
iiArgs[e] = iArgs[e];
for (int e = 0; e < numTArgs; e++)
ttArgs[e] = tArgs[e];
for (int e = 0; e < numBArgs; e++)
bbArgs[e] = bArgs[e];
// hypothetically at this point we have everything filled
auto dZ = op->execute(inputs, outputs, ttArgs, iiArgs, bbArgs, std::vector<sd::DataType>(), isInplace);
//auto dZ = op->execute(inputs, ttArgs, iiArgs, isInplace);
if (!isInplace)
for (int e = 0; e < numOutputs; e++) {
//shape::printShapeInfoLinear("JVM output shape", (int *) outputShapes[e]);
//shape::printShapeInfoLinear("C++ output shape", (int *) outputs[e]->shapeInfo());
//outputs[e]->printIndexedBuffer("C++ raw output");
//outputs[e]->printBuffer("C++ indexed output");
if (outputs[e]->ordering() != shape::order(reinterpret_cast<Nd4jLong *>(outputShapes[e])))
outputs[e]->streamline(shape::order(reinterpret_cast<Nd4jLong *>(outputShapes[e])));
}
for (auto v: inputs)
delete v;
for (auto v: outputs)
delete v;
return Status::OK();
}
int execCustomOp(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputs, Nd4jPointer* outputBuffers, Nd4jPointer* outputShapes, int numOutputs, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool* bArgs, int numBArgs, bool isInplace) {
try {
auto op = sd::ops::OpRegistrator::getInstance().getOperation(hash);
return realExec(op, extraPointers, hash, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes,
numOutputs, tArgs, numTArgs, iArgs, numIArgs, bArgs, numBArgs, isInplace);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return 1;
}
}
int execCustomOp2(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer opContext) {
try {
auto op = sd::ops::OpRegistrator::getInstance().getOperation(hash);
auto context = reinterpret_cast<Context *>(opContext);
auto result = op->execute(context);
auto res = cudaStreamSynchronize(*context->launchContext()->getCudaStream());
if (res != 0)
throw sd::cuda_exception::build("customOp execution failed", res);
for (auto v:context->fastpath_in()) {
if (!v->isEmpty())
v->syncToDevice();
}
for (auto v:context->fastpath_out()) {
if (!v->isEmpty())
v->syncToDevice();
}
return result;
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return 1;
}
}
int registerGraph(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer flatBufferPointer) {
try {
auto graph = sd::graph::GraphExecutioner::importFromFlatPointer(flatBufferPointer);
sd::graph::GraphHolder::getInstance().registerGraph(graphId, graph);
return ND4J_STATUS_OK;
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return 1;
}
}
static VariablesSet* executeStoredGraphT(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int* inputIndices, int numInputs) {
auto graph = sd::graph::GraphHolder::getInstance().pullGraph(graphId);
auto varSpace = graph->getVariableSpace()->clone();
std::vector<sd::NDArray*> handles;
for (int e = 0; e < numInputs; e++) {
auto idx = inputIndices[e];
// we'll delete this array later, together with cloned VariableSpace
auto array = new sd::NDArray(inputBuffers[e], reinterpret_cast<Nd4jLong *>(inputShapes[e]));
handles.emplace_back(array);
if (varSpace->hasVariable(idx)) {
auto var = varSpace->getVariable(idx);
if (var->hasNDArray())
delete var->getNDArray();
var->setNDArray(array);
} else
varSpace->putVariable(idx, array);
}
auto dZ = sd::graph::GraphExecutioner::execute(graph, varSpace);
auto varSet = new sd::graph::VariablesSet(dZ);
if (dZ == ND4J_STATUS_OK) {
// pull back results, and provide them
auto outputs = graph->fetchOutputs();
for (int e = 0; e < outputs->size(); e++) {
// we're only getting variable ID/Index from original grap. values will be taken from cloned workspace
std::pair<int, int> varId(outputs->at(e)->id(), outputs->at(e)->index());
auto var = varSpace->getVariable(varId);
varSet->push_back(var->clone());
}
delete outputs;
}
delete varSpace;
return varSet;
}
VariablesSet* executeStoredGraph(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int* inputIndices, int numInputs) {
try {
return executeStoredGraphT(extraPointers, graphId, inputBuffers, inputShapes, inputIndices, numInputs);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
Nd4jLong getVariablesSetSize(sd::graph::VariablesSet* set) {
return set->size();
}
Nd4jStatus getVariablesSetStatus(sd::graph::VariablesSet* set) {
return set->status();
}
sd::graph::Variable* getVariable(sd::graph::VariablesSet* set, Nd4jLong i) {
return set->at(i);
}
int getVariableId(sd::graph::Variable* variable) {
return variable->id();
}
int getVariableIndex(sd::graph::Variable* variable) {
return variable->index();
}
const char* getVariableName(sd::graph::Variable* variable) {
return variable->getName()->c_str();
}
Nd4jLong const* getVariableShape(sd::graph::Variable* variable) {
return variable->getNDArray()->shapeInfo();
}
void* getVariableBuffer(sd::graph::Variable* variable) {
return variable->getNDArray()->buffer();
}
int unregisterGraph(Nd4jPointer *extraPointers, Nd4jLong graphId) {
try {
sd::graph::GraphHolder::getInstance().dropGraphAny(graphId);
return ND4J_STATUS_OK;
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return 1;
}
}
void deletePointerArray(Nd4jPointer pointer) {
Nd4jPointer *ptr = reinterpret_cast<Nd4jPointer *>(pointer);
delete[] ptr;
}
void deleteCharArray(Nd4jPointer pointer) {
auto ptr = reinterpret_cast<char *>(pointer);
delete[] ptr;
}
void deleteIntArray(Nd4jPointer pointer) {
auto ptr = reinterpret_cast<int *>(pointer);
delete[] ptr;
}
void deleteLongArray(Nd4jPointer pointer) {
auto ptr = reinterpret_cast<Nd4jLong *>(pointer);
delete[] ptr;
}
void deleteVariablesSet(sd::graph::VariablesSet* pointer) {
delete pointer;
}
void deleteShapeList(Nd4jPointer shapeList) {
sd::ShapeList* list = reinterpret_cast<sd::ShapeList*>(shapeList);
//list->destroy();
delete list;
}
const char* getAllOperations() {
return sd::OpTracker::getInstance().exportOperations();
}
Nd4jPointer getGraphState(Nd4jLong id) {
return (Nd4jPointer) new sd::graph::GraphState(id);
}
void deleteGraphState(Nd4jPointer state) {
auto stateP = reinterpret_cast<sd::graph::GraphState*>(state);
delete stateP;
}
Nd4jStatus execCustomOpWithScope(Nd4jPointer *extraPointers, sd::graph::GraphState *state, Nd4jLong opHash, Nd4jLong *scopes, int numScopes, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int numInputs, Nd4jPointer *outputBuffers, Nd4jPointer *outputShapes, int numOutputs) {
/**
* That's basically exec, with VariableSpace provided in GraphState:
* depending on operation (i.e. while of if), different logic executors could be used
*/
auto graph = state->graph();
auto varSpace = state->variableSpace();
// Node is dynamically created, and has nothing beyond it: only inputs and outputs
// this node has id of 0, and inputs are
Node node(OpType_LOGIC, opHash, 0);
// mapping inputs
for (int e = 0; e < numInputs; e++) {
auto buffer = inputBuffers[e];
auto shapeInfo = reinterpret_cast<Nd4jLong *>(inputShapes[e]);
auto array = new sd::NDArray(buffer, shapeInfo, varSpace->launchContext());
// now we just put array to VarSpace
varSpace->putVariable(0, e, array);
node.pickInput(0, e);
}
// mapping scopes
for (int e = 0; e < numScopes; e++) {
// we should check scope existence in GraphState/Graph
int scopeId = (int) scopes[e];
if (!state->hasScope(scopeId)) {
// nd4j_printf("execCustomOpWithScope: referenced scope [%i] doesn't exist\n", scopeId);
return Status::THROW();
}
node.pickInput(scopeId, 0);
}
auto dZ = LogicExecutor::processNode(graph, &node);
if (dZ != Status::OK())
return dZ;
// mapping outputs
for (int e = 0; e < numOutputs; e++) {
auto buffer = outputBuffers[e];
auto shapeInfo = reinterpret_cast<Nd4jLong *>(outputShapes[e]);
NDArray array(buffer, shapeInfo, varSpace->launchContext());
// now we just put array to VarSpace to the same ID
//varSpace->putVariable(0, e, array);
auto t = varSpace->getVariable(0, e)->getNDArray();
array.assign(t);
}
// removing input variables
for (int e = 0; e < numInputs; e++) {
varSpace->dropVariable(0, e);
}
// after some bla-bla-bla we should have Graph and Node for current op
return Status::OK();
}
Nd4jStatus execCustomOpWithScope(Nd4jPointer *extraPointers, Nd4jPointer state, Nd4jLong opHash, Nd4jLong *scopes, int numScopes, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int numInputs, Nd4jPointer *outputBuffers, Nd4jPointer *outputShapes, int numOutputs) {
try {
return execCustomOpWithScope(extraPointers, reinterpret_cast<sd::graph::GraphState *>(state), opHash, scopes,
numScopes, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes,
numOutputs);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return 1;
}
}
void deleteResultWrapper(Nd4jPointer ptr) {
// just 0 room for compiler s@!t
auto p = reinterpret_cast<sd::graph::ResultWrapper *>(ptr);
delete p;
}
int estimateThreshold(Nd4jPointer *extraPointers, Nd4jPointer dX, Nd4jLong const* dXShapeInfo, int N, float threshold) {
throw std::runtime_error("estimateThreshold: Not implemented yet");
}
/*
* TypeDef:
* void convertTypes(Nd4jPointer *extras, int srcType, Nd4jPointer dX, long N, int dstType, Nd4jPointer dZ);
*/
void convertTypes(Nd4jPointer *extras, int srcType, Nd4jPointer dX, Nd4jLong N, int dstType, Nd4jPointer dZ) {
try {
auto dx = reinterpret_cast<void *>(dX);
auto dz = reinterpret_cast<void *>(dZ);
if (srcType == ND4J_FLOAT8) {
if (dstType == ND4J_FLOAT8) {
// convertKernel<double, sd::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
//sd::TypeCast::convertGenericCuda<sd::float8, sd::int8>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
//sd::TypeCast::convertGenericCuda<sd::float8, sd::uint8>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
//sd::TypeCast::convertGenericCuda<sd::float8, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
//sd::TypeCast::convertGenericCuda<sd::float8, sd::int16>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
//sd::TypeCast::convertGenericCuda<sd::float8, sd::uint16>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
} else if (dstType == ND4J_FLOAT32) {
//sd::TypeCast::convertGenericCuda<sd::float8, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
//sd::TypeCast::convertGenericCuda<sd::float8, double>(extras, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_INT8) {
if (dstType == ND4J_FLOAT8) {
//sd::TypeCast::convertGenericCuda<sd::int8, sd::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
//convertKernel<sd::int8, sd::int8>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
sd::TypeCast::convertGenericCuda<int8_t, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
sd::TypeCast::convertGenericCuda<int8_t, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
sd::TypeCast::convertGenericCuda<int8_t, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
sd::TypeCast::convertGenericCuda<int8_t, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
// TODO: eventually we might want to add it
} else if (dstType == ND4J_FLOAT32) {
sd::TypeCast::convertGenericCuda<int8_t, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
sd::TypeCast::convertGenericCuda<int8_t, double>(extras, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_UINT8) {
if (dstType == ND4J_FLOAT8) {
//sd::TypeCast::convertGenericCuda<uint8_t, sd::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
sd::TypeCast::convertGenericCuda<uint8_t, int8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
sd::TypeCast::convertGenericCuda<uint8_t, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
sd::TypeCast::convertGenericCuda<uint8_t, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
sd::TypeCast::convertGenericCuda<uint8_t, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
sd::TypeCast::convertGenericCuda<uint8_t, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
// TODO: still might want to add
} else if (dstType == ND4J_FLOAT32) {
sd::TypeCast::convertGenericCuda<uint8_t, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
sd::TypeCast::convertGenericCuda<uint8_t, double>(extras, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_FLOAT16) {
if (dstType == ND4J_FLOAT8) {
//sd::TypeCast::convertGenericCuda<float16, sd::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
sd::TypeCast::convertGenericCuda<float16, int8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
sd::TypeCast::convertGenericCuda<float16, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
sd::TypeCast::convertGenericCuda<float16, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
sd::TypeCast::convertGenericCuda<float16, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
sd::TypeCast::convertGenericCuda<float16, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
// TODO: .... ^^^
} else if (dstType == ND4J_FLOAT32) {
sd::TypeCast::convertGenericCuda<float16, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
sd::TypeCast::convertGenericCuda<float16, double>(extras, dx, N, dz);
} else if (dstType == ND4J_THRESHOLD) {
//sd::convertToThreshold<float16>(nullptr, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_INT16) {
if (dstType == ND4J_FLOAT8) {
//sd::TypeCast::convertGenericCuda<int16_t, sd::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
sd::TypeCast::convertGenericCuda<int16_t, int8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
sd::TypeCast::convertGenericCuda<int16_t, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
sd::TypeCast::convertGenericCuda<int16_t, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
sd::TypeCast::convertGenericCuda<int16_t, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
sd::TypeCast::convertGenericCuda<int16_t, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
// TODO...
} else if (dstType == ND4J_FLOAT32) {
sd::TypeCast::convertGenericCuda<int16_t, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
sd::TypeCast::convertGenericCuda<int16_t, double>(extras, dx, N, dz);
} else {
printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_FLOAT24) {
} else if (srcType == ND4J_FLOAT32) {
if (dstType == ND4J_FLOAT8) {
//sd::TypeCast::convertGenericCuda<float, sd::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
sd::TypeCast::convertGenericCuda<float, int8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
sd::TypeCast::convertGenericCuda<float, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
sd::TypeCast::convertGenericCuda<float, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
sd::TypeCast::convertGenericCuda<float, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
sd::TypeCast::convertGenericCuda<float, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
} else if (dstType == ND4J_DOUBLE) {
sd::TypeCast::convertGenericCuda<float, double>(extras, dx, N, dz);
} else if (dstType == ND4J_THRESHOLD) {
//sd::convertToThreshold<float>(nullptr, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_DOUBLE) {
if (dstType == ND4J_FLOAT8) {
//sd::TypeCast::convertGenericCuda<double, sd::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
sd::TypeCast::convertGenericCuda<double, int8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
sd::TypeCast::convertGenericCuda<double, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
sd::TypeCast::convertGenericCuda<double, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
sd::TypeCast::convertGenericCuda<double, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
sd::TypeCast::convertGenericCuda<double, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
} else if (dstType == ND4J_FLOAT32) {
sd::TypeCast::convertGenericCuda<double, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
//
} else if (dstType == ND4J_THRESHOLD) {
//sd::convertToThreshold<double>(nullptr, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_THRESHOLD) {
if (dstType == ND4J_FLOAT16) {
//sd::convertFromThreshold<float16>(nullptr, dx, N, dz);
} else if (dstType == ND4J_FLOAT32) {
//sd::convertFromThreshold<float>(nullptr, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
//sd::convertFromThreshold<double>(nullptr, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
Nd4jPointer createUtf8String(Nd4jPointer *extraPointers, const char *string, int length) {
auto u = new sd::utf8string(string, length);
return reinterpret_cast<Nd4jPointer>(u);
}
Nd4jLong getUtf8StringLength(Nd4jPointer *extraPointers, Nd4jPointer ptr) {
return reinterpret_cast<sd::utf8string*>(ptr)->_length;
}
char* getUtf8StringBuffer(Nd4jPointer *extraPointers, Nd4jPointer ptr) {
return reinterpret_cast<sd::utf8string*>(ptr)->_buffer;
}
void deleteUtf8String(Nd4jPointer *extraPointers, Nd4jPointer ptr) {
delete(reinterpret_cast<sd::utf8string*>(ptr));
}
///////////////////////////////////////////////////////////////////
template<typename T, typename I>
__global__ static void scatterUpdateCuda(const int opCode, const int numOfSubArrs,
void* vx, const Nd4jLong* xShapeInfo, const Nd4jLong *xOffsets,
void* vy, const Nd4jLong *yShapeInfo, const Nd4jLong *yOffsets,
const void* vindexes) {
__shared__ T *x, *y;
__shared__ Nd4jLong arrLenX, arrLenY;
auto indexes = reinterpret_cast<const I*>(vindexes);
for (int e = 0; e < numOfSubArrs; e++ ) {
const auto xIndex = indexes[e];
const bool isOwner = xIndex < gridDim.x ? blockIdx.x == xIndex : blockIdx.x == xIndex % gridDim.x;
if (!isOwner)
continue;
if (threadIdx.x == 0) {
x = reinterpret_cast<T*>(vx) + xOffsets[xIndex];
y = reinterpret_cast<T*>(vy) + yOffsets[e];
arrLenX = shape::length(xShapeInfo);
arrLenY = shape::length(yShapeInfo);
}
__syncthreads();
if (arrLenX != arrLenY)
return;
for (Nd4jLong i = threadIdx.x; i < arrLenX; i += blockDim.x) {
const auto xOffset = shape::getIndexOffset(i, xShapeInfo);
const auto yOffset = shape::getIndexOffset(i, yShapeInfo);
switch (opCode) {
case 0:
x[xOffset] += y[yOffset];
break;
case 1:
x[xOffset] -= y[yOffset];
break;
case 2:
x[xOffset] *= y[yOffset];
break;
case 3:
x[xOffset] /= y[yOffset];
break;
case 4:
x[xOffset] = y[yOffset] - x[xOffset];
break;
case 5:
x[xOffset] = y[yOffset] / x[xOffset];
break;
case 6:
x[xOffset] = y[yOffset];
break;
default:
continue;
}
}
__syncthreads();
}
}
template<typename T, typename I>
__host__ static void scatterUpdateCudaLauncher(const cudaStream_t* stream, const int opCode, const int numOfSubArrs, void* vx, const Nd4jLong const* xShapeInfo, const Nd4jLong* xOffsets, void* vy, const Nd4jLong *yShapeInfo, const Nd4jLong *yOffsets, const void* indexes) {
scatterUpdateCuda<T, I><<<512, 256, MAX_NUM_THREADS, *stream>>>(opCode, numOfSubArrs, vx, xShapeInfo, xOffsets, vy, yShapeInfo, yOffsets, indexes);
}
//////////////////////////////////////////////////////////////////////////
void scatterUpdate(Nd4jPointer *extraPointers, int opCode, int numOfSubArrs,
void* hX, Nd4jLong const* hXShapeInfo, Nd4jLong const* hXOffsets,
void* dX, Nd4jLong const* dXShapeInfo, Nd4jLong const* dXOffsets,
void* hY, Nd4jLong const* hYShapeInfo, Nd4jLong const* hYOffsets,
void* dY, Nd4jLong const* dYShapeInfo, Nd4jLong const* dYOffsets,
void* hIindexes, Nd4jLong const* hIndicesShapeInfo, void* dIindexes, Nd4jLong const* dIndicesShapeInfo) {
try {
auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
auto type = ArrayOptions::dataType(hXShapeInfo);
auto iType = ArrayOptions::dataType(hIndicesShapeInfo);
BUILD_DOUBLE_SELECTOR(type, iType, scatterUpdateCudaLauncher,
(stream, opCode, numOfSubArrs, dX, dXShapeInfo, dXOffsets, dY, dYShapeInfo, dYOffsets, dIindexes),
LIBND4J_TYPES, INDEXING_TYPES);
sd::DebugHelper::checkErrorCode(stream, "scatterUpdate(...) failed");
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void inspectArray(Nd4jPointer *extraPointers, Nd4jPointer buffer, Nd4jLong *shapeInfo, Nd4jPointer specialBuffer, Nd4jLong *specialShapeInfo, Nd4jPointer debugInfo) {
try {
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
auto p = reinterpret_cast<sd::DebugInfo *>(debugInfo);
NDArray array(buffer, specialBuffer, shapeInfo, &lc);
sd::DebugHelper::retrieveDebugStatistics(p, &array);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void __global__ tryPointerKernel(void* p, int len) {
auto buf = reinterpret_cast<int8_t*>(p);
auto tid = threadIdx.x + blockIdx.x * blockDim.x;
__shared__ int b;
if (tid < len)
atomicAdd(&b, buf[tid]);
__syncthreads();
if (threadIdx.x ==0 && blockIdx.x == 0)
printf("Pointer check complete: %i\n", b);
}
void tryPointer(Nd4jPointer extra, Nd4jPointer p, int len) {
try {
cudaStream_t stream;
cudaStreamCreate(&stream);
tryPointerKernel <<< 256, 512, len + 64, stream>>> (p, len);
auto e = cudaStreamSynchronize(stream);
if (e != 0)
throw sd::cuda_exception::build("tryPointer failed", e);
cudaStreamDestroy(stream);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
int dataTypeFromNpyHeader(void *header) {
return (int) cnpy::dataTypeFromHeader(reinterpret_cast<char *>(header));
}
OpaqueConstantShapeBuffer* shapeBuffer(int rank, Nd4jLong *shape, Nd4jLong *strides, sd::DataType dtype, char order, Nd4jLong ews, bool empty) {
try {
auto buffer = new ConstantShapeBuffer();
*buffer = sd::ConstantShapeHelper::getInstance().bufferForShapeInfo(
ShapeDescriptor(dtype, order, shape, strides, rank, ews, empty));
return buffer;
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
void deleteConstantShapeBuffer(OpaqueConstantShapeBuffer* ptr) {
delete ptr;
}
void deleteConstantDataBuffer(OpaqueConstantDataBuffer* ptr) {
delete ptr;
}
void deleteTadPack(sd::TadPack* ptr) {
delete ptr;
}
bool isBlasVersionMatches(int major, int minor, int build) {
auto result = major == Environment::getInstance()._blasMajorVersion && minor == Environment::getInstance()._blasMinorVersion && build == Environment::getInstance()._blasPatchVersion;
if (!result) {
nd4j_printf("CUDA/cuBLAS version mismatch. Expected: %i.%i.%i but got %i.%i.%i instead\n", Environment::getInstance()._blasMajorVersion, Environment::getInstance()._blasMinorVersion, Environment::getInstance()._blasPatchVersion, major, minor, build);
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(152);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("CUDA/cuBLAS version mismatch");
}
return result;
}
sd::ConstantDataBuffer* constantBufferLong(sd::DataType dtype, Nd4jLong const* data, int length) {
return sd::ConstantHelper::getInstance().constantBuffer(ConstantDescriptor(data, length), dtype);
}
sd::ConstantDataBuffer* constantBufferDouble(sd::DataType dtype, double *data, int length) {
return sd::ConstantHelper::getInstance().constantBuffer(ConstantDescriptor(data, length), dtype);
}
sd::ConstantDataBuffer* constantBuffer(sd::DataType dtype, sd::ConstantDescriptor *descriptor) {
return sd::ConstantHelper::getInstance().constantBuffer(*descriptor, dtype);
}
Nd4jPointer getConstantDataBufferPrimary(sd::ConstantDataBuffer* dbf) {
return dbf->primary();
}
Nd4jPointer getConstantDataBufferSpecial(sd::ConstantDataBuffer* dbf) {
return dbf->special();
}
Nd4jLong getConstantDataBufferLength(sd::ConstantDataBuffer* dbf) {
return dbf->length();
}
Nd4jLong getConstantDataBufferSizeOf(sd::ConstantDataBuffer* dbf) {
return dbf->sizeOf();
}
Nd4jPointer getConstantShapeBufferPrimary(sd::ConstantShapeBuffer* dbf) {
return const_cast<Nd4jLong*>(dbf->primary());
}
Nd4jPointer getConstantShapeBufferSpecial(sd::ConstantShapeBuffer* dbf) {
return const_cast<Nd4jLong*>(dbf->special());
}
sd::graph::Context* createGraphContext(int nodeId) {
return new sd::graph::Context(nodeId);
}
sd::graph::RandomGenerator* getGraphContextRandomGenerator(sd::graph::Context* ptr) {
return &ptr->randomGenerator();
}
void markGraphContextInplace(sd::graph::Context* ptr, bool reallyInplace) {
ptr->markInplace(reallyInplace);
}
void setGraphContextCudaContext(sd::graph::Context* ptr, void *stream, void *reductionPointer, void *allocationPointer) {
ptr->setCudaContext(stream, reductionPointer, allocationPointer);
}
void setGraphContextInputArray(sd::graph::Context* ptr, int index, void *buffer, void *shapeInfo, void *specialBuffer, void *specialShapeInfo) {
ptr->setInputArray(index, buffer, shapeInfo, specialBuffer, specialShapeInfo);
}
void setGraphContextOutputArray(sd::graph::Context* ptr, int index, void *buffer, void *shapeInfo, void *specialBuffer, void *specialShapeInfo) {
ptr->setOutputArray(index, buffer, shapeInfo, specialBuffer, specialShapeInfo);
}
void setGraphContextInputBuffer(OpaqueContext* ptr, int index, OpaqueDataBuffer *buffer, void *shapeInfo, void *specialShapeInfo) {
ptr->setInputArray(index, buffer, shapeInfo, specialShapeInfo);
}
void setGraphContextOutputBuffer(OpaqueContext* ptr, int index, OpaqueDataBuffer *buffer, void *shapeInfo, void *specialShapeInfo) {
ptr->setOutputArray(index, buffer, shapeInfo, specialShapeInfo);
}
void setGraphContextTArguments(sd::graph::Context* ptr, double *arguments, int numberOfArguments) {
ptr->setTArguments(arguments, numberOfArguments);
}
void setGraphContextIArguments(sd::graph::Context* ptr, Nd4jLong *arguments, int numberOfArguments) {
ptr->setIArguments(arguments, numberOfArguments);
}
void setGraphContextBArguments(sd::graph::Context* ptr, bool *arguments, int numberOfArguments) {
ptr->setBArguments(arguments, numberOfArguments);
}
void setGraphContextDArguments(OpaqueContext* ptr, int *arguments, int numberOfArguments) {
std::vector<sd::DataType> dtypes(numberOfArguments);
for (int e = 0; e < numberOfArguments; e++)
dtypes[e] = (sd::DataType) arguments[e];
ptr->setDArguments(dtypes);
}
void deleteGraphContext(sd::graph::Context* ptr) {
delete ptr;
}
sd::graph::RandomGenerator* createRandomGenerator(Nd4jLong rootSeed, Nd4jLong nodeSeed) {
try {
return new sd::graph::RandomGenerator(rootSeed, nodeSeed);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
Nd4jLong getRandomGeneratorRootState(sd::graph::RandomGenerator* ptr) {
return ptr->rootState();
}
Nd4jLong getRandomGeneratorNodeState(sd::graph::RandomGenerator* ptr) {
return ptr->nodeState();
}
void setRandomGeneratorStates(sd::graph::RandomGenerator* ptr, Nd4jLong rootSeed, Nd4jLong nodeSeed) {
ptr->setStates(rootSeed, nodeSeed);
}
float getRandomGeneratorRelativeFloat(sd::graph::RandomGenerator* ptr, Nd4jLong index) {
return ptr->relativeT<float>(index);
}
double getRandomGeneratorRelativeDouble(sd::graph::RandomGenerator* ptr, Nd4jLong index) {
return ptr->relativeT<double>(index);
}
int getRandomGeneratorRelativeInt(sd::graph::RandomGenerator* ptr, Nd4jLong index) {
return ptr->relativeInt(index);
}
Nd4jLong getRandomGeneratorRelativeLong(sd::graph::RandomGenerator* ptr, Nd4jLong index) {
return ptr->relativeLong(index);
}
void deleteRandomGenerator(sd::graph::RandomGenerator* ptr) {
delete ptr;
}
Nd4jPointer shapeBufferForNumpy(Nd4jPointer npyArray) {
try {
cnpy::NpyArray arr = cnpy::loadNpyFromPointer(reinterpret_cast<char *>(npyArray));
unsigned int shapeSize = arr.shape.size();
std::vector<Nd4jLong> shape(shapeSize);
bool _empty = false;
for (unsigned int i = 0; i < shapeSize; i++) {
shape[i] = arr.shape[i];
if (arr.shape[i] == 0)
_empty = true;
}
auto dtype = cnpy::dataTypeFromHeader(reinterpret_cast<char *>(npyArray));
Nd4jLong *shapeBuffer;
if (shape.size() == 1 && shape[0] == 0) {
// scalar case
shapeBuffer = sd::ShapeBuilders::createScalarShapeInfo(dtype);
} else if (_empty) {
if (shapeSize > 0)
shapeBuffer = sd::ShapeBuilders::emptyShapeInfo(dtype, arr.fortranOrder ? 'f' : 'c', shape);
else
shapeBuffer = sd::ShapeBuilders::emptyShapeInfo(dtype);
} else {
shapeBuffer = sd::ShapeBuilders::createShapeInfo(dtype, arr.fortranOrder ? 'f' : 'c', shape);
}
return (Nd4jPointer)(sd::ConstantShapeHelper::getInstance().createFromExisting(shapeBuffer, true)); // TO DO: this can lead to unpleasant crash sometimes
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
const char* runLightBenchmarkSuit(bool printOut) {
try {
sd::LightBenchmarkSuit suit;
auto result = suit.runSuit();
if (printOut)
nd4j_printf("%s\n", result.data());
auto chars = new char[result.length() + 1];
std::memcpy(chars, result.data(), result.length());
chars[result.length()] = (char) 0x0;
return chars;
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
const char* runFullBenchmarkSuit(bool printOut) {
try {
sd::FullBenchmarkSuit suit;
auto result = suit.runSuit();
if (printOut)
nd4j_printf("%s\n", result.data());
auto chars = new char[result.length() + 1];
std::memcpy(chars, result.data(), result.length());
chars[result.length()] = (char) 0x0;
return chars;
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
Nd4jLong getCachedMemory(int deviceId) {
return sd::ConstantHelper::getInstance().getCachedAmount(deviceId);
}
sd::LaunchContext* defaultLaunchContext() {
return LaunchContext::defaultContext();
}
Nd4jPointer lcScalarPointer(OpaqueLaunchContext* lc) {
return lc->getScalarPointer();
}
Nd4jPointer lcReductionPointer(OpaqueLaunchContext* lc) {
return lc->getReductionPointer();
}
Nd4jPointer lcAllocationPointer(OpaqueLaunchContext* lc) {
return lc->getAllocationPointer();
}
Nd4jPointer lcExecutionStream(OpaqueLaunchContext* lc) {
return lc->getCudaStream();
}
Nd4jPointer lcCopyStream(OpaqueLaunchContext* lc) {
return lc->getCudaSpecialStream();
}
Nd4jPointer lcBlasHandle(OpaqueLaunchContext* lc) {
return lc->getCublasHandle();
}
Nd4jPointer lcSolverHandle(OpaqueLaunchContext* lc) {
return lc->getCusolverHandle();
}
int lastErrorCode() {
return sd::LaunchContext::defaultContext()->errorReference()->errorCode();
}
const char* lastErrorMessage() {
return sd::LaunchContext::defaultContext()->errorReference()->errorMessage();
}
void ctxShapeFunctionOverride(OpaqueContext* ptr, bool reallyOverride) {
ptr->setShapeFunctionOverride(reallyOverride);
}
void ctxPurge(OpaqueContext* ptr) {
ptr->clearFastPath();
}
int binaryLevel() {
return 0;
}
int optimalLevel() {
return 0;
}
bool isMinimalRequirementsMet() {
return true;
}
bool isOptimalRequirementsMet() {
return true;
}
void ctxAllowHelpers(OpaqueContext* ptr, bool reallyAllow) {
ptr->allowHelpers(reallyAllow);
}
void ctxSetExecutionMode(OpaqueContext* ptr, int execMode) {
if (execMode < 0 || execMode > 2)
execMode = 0;
ptr->setExecutionMode((samediff::ExecutionMode) execMode);
}
OpaqueDataBuffer* dbCreateExternalDataBuffer(Nd4jLong elements, int dataType, Nd4jPointer primary, Nd4jPointer special) {
auto buffer = dbAllocateDataBuffer(0, dataType, false);
if (primary != nullptr)
buffer->setPrimary(primary, elements);
if (special != nullptr)
buffer->setSpecial(special, elements);
return buffer;
}
OpaqueDataBuffer* dbAllocateDataBuffer(Nd4jLong elements, int dataType, bool allocateBoth) {
return allocateDataBuffer(elements, dataType, allocateBoth);
}
OpaqueDataBuffer* allocateDataBuffer(Nd4jLong elements, int dataType, bool allocateBoth) {
try {
auto dtype = DataTypeUtils::fromInt(dataType);
return new sd::InteropDataBuffer(elements * DataTypeUtils::sizeOf(dtype), dtype, allocateBoth);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
Nd4jPointer dbPrimaryBuffer(OpaqueDataBuffer *dataBuffer) {
return dataBuffer->primary();
}
Nd4jPointer dbSpecialBuffer(OpaqueDataBuffer *dataBuffer) {
return dataBuffer->special();
}
void deleteDataBuffer(OpaqueDataBuffer *dataBuffer) {
delete dataBuffer;
}
void dbSetPrimaryBuffer(OpaqueDataBuffer *dataBuffer, Nd4jPointer primaryBuffer, Nd4jLong numBytes) {
dataBuffer->setPrimary(primaryBuffer, numBytes);
}
void dbSetSpecialBuffer(OpaqueDataBuffer *dataBuffer, Nd4jPointer specialBuffer, Nd4jLong numBytes) {
dataBuffer->setSpecial(specialBuffer, numBytes);
}
void dbAllocatePrimaryBuffer(OpaqueDataBuffer *dataBuffer) {
dataBuffer->dataBuffer()->allocatePrimary();
}
void dbAllocateSpecialBuffer(OpaqueDataBuffer *dataBuffer) {
dataBuffer->dataBuffer()->allocateSpecial();
}
void dbExpandBuffer(OpaqueDataBuffer *dataBuffer, Nd4jLong elements) {
try {
dataBuffer->dataBuffer()->expand(elements * DataTypeUtils::sizeOf(dataBuffer->dataBuffer()->getDataType()));
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
OpaqueDataBuffer* dbCreateView(OpaqueDataBuffer *dataBuffer, Nd4jLong length, Nd4jLong offset) {
return new InteropDataBuffer(*dataBuffer, length, offset);
}
void dbSyncToSpecial(OpaqueDataBuffer *dataBuffer) {
dataBuffer->dataBuffer()->syncToSpecial();
}
void dbSyncToPrimary(OpaqueDataBuffer *dataBuffer) {
dataBuffer->dataBuffer()->syncToPrimary(nullptr);
}
void dbTickHostRead(OpaqueDataBuffer *dataBuffer) {
dataBuffer->dataBuffer()->readPrimary();
}
void dbTickHostWrite(OpaqueDataBuffer *dataBuffer) {
dataBuffer->dataBuffer()->writePrimary();
}
void dbTickDeviceRead(OpaqueDataBuffer *dataBuffer) {
dataBuffer->dataBuffer()->readSpecial();
}
void dbTickDeviceWrite(OpaqueDataBuffer *dataBuffer) {
dataBuffer->dataBuffer()->writeSpecial();
}
void dbExpand(OpaqueDataBuffer *dataBuffer, Nd4jLong elements) {
dataBuffer->expand(elements);
}
void dbClose(OpaqueDataBuffer *dataBuffer) {
dataBuffer->getDataBuffer()->close();
}
int dbDeviceId(OpaqueDataBuffer *dataBuffer) {
return dataBuffer->deviceId();
}
void dbSetDeviceId(OpaqueDataBuffer *dataBuffer, int deviceId) {
dataBuffer->setDeviceId(deviceId);
}
int dbLocality(OpaqueDataBuffer *dataBuffer) {
auto p = dataBuffer->dataBuffer()->isPrimaryActual();
auto d = dataBuffer->dataBuffer()->isSpecialActual();
if (p && d)
return 0;
else if (p)
return -1;
else
return 1;
} |
ae5278e0eabfffe9b8a43f93a28a513e004a903f.hip | // !!! This is a file automatically generated by hipify!!!
#include "vdsr.cuh"
VDSR::VDSR(cudnnHandle_t cudnnHandle, int batch, int inHeight, int inWidth, int inChannel, int layers)
{
int i;
this->batch = batch;
this->inHeight = inHeight;
this->inWidth = inWidth;
this->inChannel = inChannel;
this->layers = layers;
I1.build(batch, inChannel, inHeight, inWidth);
C_in.build(cudnnHandle, I1.xDesc, batch, I1.height, I1.width, I1.channel, 64, 3, 1);
C_layers = new CovLayer[layers];
C_layers[0].build(cudnnHandle, C_in.yDesc, batch, inHeight, inWidth, 64, 64, 3, 1);
for(i=1;i<layers;i++)
C_layers[i].build(cudnnHandle, C_layers[i-1].yDesc, batch, inHeight, inWidth, 64, 64, 3, 1);
C_out.build(cudnnHandle, C_layers[layers-1].yDesc, batch, inHeight, inWidth, 64, 1, 3, 1);
workspaceSize = C_in.workspaceSize;
for(i=0;i<layers;i++)
workspaceSize = workspaceSize > C_layers[i].workspaceSize ? workspaceSize : C_layers[i].workspaceSize;
workspaceSize = workspaceSize > C_out.workspaceSize ? workspaceSize : C_out.workspaceSize;
hipMalloc(&workspace, workspaceSize);
}
int VDSR::load_data(xwtype*input)
{
hipDeviceSynchronize();
I1.load(input);
return 0;
}
#ifdef INT8x4_EXT_CONFIG
int VDSR::load_para(const char*fn)
{
int i;
FILE *fp;
if (fopen_s(&fp, fn, "rb"))
{
printf("open file %s failed\n", fn);
exit(1);
}
C_in.load_para(fp);
for (i = 0;i < layers;i++)
C_layers[i].load_para(fp);
C_out.load_para(fp);
fseek(fp, -4, SEEK_END);
fread(&ratio_out, sizeof(float), 1, fp);
fclose(fp);
return 0;
}
int VDSR::forward(cudnnHandle_t cudnnHandle)
{
int i;
C_in.ConvForward(cudnnHandle, I1.xDesc, I1.x, workspace);
C_in.activate(cudnnHandle);
C_in.quantize_out();
//C_in.viewmem((xwtype*)I1.x);
C_layers[0].ConvForward(cudnnHandle, C_in.yDesc, C_in.y, workspace);
C_layers[0].activate(cudnnHandle);
C_layers[0].quantize_out();
//C_layers[0].viewmem((xwtype*)C_in.y);
for (i = 1;i < layers;i++)
{
C_layers[i].ConvForward(cudnnHandle, C_layers[i - 1].yDesc, C_layers[i - 1].y, workspace);
C_layers[i].activate(cudnnHandle);
C_layers[i].quantize_out();
//C_layers[i].viewmem((xwtype*)C_layers[i - 1].y);
}
C_out.ConvForward(cudnnHandle, C_layers[layers - 1].yDesc, C_layers[layers - 1].y, workspace);
//C_out.viewmem((xwtype*)C_layers[layers - 1].y);
return 0;
}
#elif defined FLOAT_CONFIG
int VDSR::load_para(const char*fn)
{
int i;
FILE *fp;
if (fopen_s(&fp, fn, "rb"))
{
printf("open file %s failed\n", fn);
exit(1);
}
C_in.load_para(fp);
for (i = 0;i < layers;i++)
C_layers[i].load_para(fp);
C_out.load_para(fp);
fclose(fp);
return 0;
}
int VDSR::forward(cudnnHandle_t cudnnHandle)
{
int i;
C_in.ConvForward(cudnnHandle, I1.xDesc, I1.x, workspace);
C_in.activate(cudnnHandle);
//C_in.viewmem((xwtype*)I1.x);
C_layers[0].ConvForward(cudnnHandle, C_in.yDesc, C_in.y, workspace);
C_layers[0].activate(cudnnHandle);
//C_layers[0].viewmem((xwtype*)C_in.y);
for (i = 1;i < layers;i++)
{
C_layers[i].ConvForward(cudnnHandle, C_layers[i - 1].yDesc, C_layers[i - 1].y, workspace);
C_layers[i].activate(cudnnHandle);
//C_layers[i].viewmem((xwtype*)C_layers[i - 1].y);
}
C_out.ConvForward(cudnnHandle, C_layers[layers - 1].yDesc, C_layers[layers - 1].y, workspace);
//C_out.viewmem((xwtype*)C_layers[layers - 1].y);
return 0;
}
int VDSR::quantizeNsave(const char*STEP_FILE, const char*BLU_FILE, const char*QUANT_MODEL)
{
int i;
FILE *f_step, *f_blu, *f_quant;
float temp;
if (fopen_s(&f_step, STEP_FILE, "rb"))
{
printf("open file %s failed\n", STEP_FILE);
exit(1);
}
if (fopen_s(&f_blu, BLU_FILE, "rb"))
{
printf("open file %s failed\n", BLU_FILE);
exit(1);
}
if (fopen_s(&f_quant, QUANT_MODEL, "wb"))
{
printf("open file %s failed\n", QUANT_MODEL);
exit(1);
}
C_in.quantizeNsave(f_step, f_blu, f_quant);
for (i = 0;i < layers;i++)
C_layers[i].quantizeNsave(f_step, f_blu, f_quant);
C_out.quantizeNsave(f_step, f_blu, f_quant);
fread(&temp, sizeof(float), 1, f_blu);//ratio_out
fwrite(&temp, sizeof(float), 1, f_quant);//ratio_out
fclose(f_step);
fclose(f_blu);
fclose(f_quant);
return 0;
}
#endif
VDSR::~VDSR(void)
{
delete[] C_layers;
hipFree(workspace);
return;
}
| ae5278e0eabfffe9b8a43f93a28a513e004a903f.cu | #include "vdsr.cuh"
VDSR::VDSR(cudnnHandle_t cudnnHandle, int batch, int inHeight, int inWidth, int inChannel, int layers)
{
int i;
this->batch = batch;
this->inHeight = inHeight;
this->inWidth = inWidth;
this->inChannel = inChannel;
this->layers = layers;
I1.build(batch, inChannel, inHeight, inWidth);
C_in.build(cudnnHandle, I1.xDesc, batch, I1.height, I1.width, I1.channel, 64, 3, 1);
C_layers = new CovLayer[layers];
C_layers[0].build(cudnnHandle, C_in.yDesc, batch, inHeight, inWidth, 64, 64, 3, 1);
for(i=1;i<layers;i++)
C_layers[i].build(cudnnHandle, C_layers[i-1].yDesc, batch, inHeight, inWidth, 64, 64, 3, 1);
C_out.build(cudnnHandle, C_layers[layers-1].yDesc, batch, inHeight, inWidth, 64, 1, 3, 1);
workspaceSize = C_in.workspaceSize;
for(i=0;i<layers;i++)
workspaceSize = workspaceSize > C_layers[i].workspaceSize ? workspaceSize : C_layers[i].workspaceSize;
workspaceSize = workspaceSize > C_out.workspaceSize ? workspaceSize : C_out.workspaceSize;
cudaMalloc(&workspace, workspaceSize);
}
int VDSR::load_data(xwtype*input)
{
cudaDeviceSynchronize();
I1.load(input);
return 0;
}
#ifdef INT8x4_EXT_CONFIG
int VDSR::load_para(const char*fn)
{
int i;
FILE *fp;
if (fopen_s(&fp, fn, "rb"))
{
printf("open file %s failed\n", fn);
exit(1);
}
C_in.load_para(fp);
for (i = 0;i < layers;i++)
C_layers[i].load_para(fp);
C_out.load_para(fp);
fseek(fp, -4, SEEK_END);
fread(&ratio_out, sizeof(float), 1, fp);
fclose(fp);
return 0;
}
int VDSR::forward(cudnnHandle_t cudnnHandle)
{
int i;
C_in.ConvForward(cudnnHandle, I1.xDesc, I1.x, workspace);
C_in.activate(cudnnHandle);
C_in.quantize_out();
//C_in.viewmem((xwtype*)I1.x);
C_layers[0].ConvForward(cudnnHandle, C_in.yDesc, C_in.y, workspace);
C_layers[0].activate(cudnnHandle);
C_layers[0].quantize_out();
//C_layers[0].viewmem((xwtype*)C_in.y);
for (i = 1;i < layers;i++)
{
C_layers[i].ConvForward(cudnnHandle, C_layers[i - 1].yDesc, C_layers[i - 1].y, workspace);
C_layers[i].activate(cudnnHandle);
C_layers[i].quantize_out();
//C_layers[i].viewmem((xwtype*)C_layers[i - 1].y);
}
C_out.ConvForward(cudnnHandle, C_layers[layers - 1].yDesc, C_layers[layers - 1].y, workspace);
//C_out.viewmem((xwtype*)C_layers[layers - 1].y);
return 0;
}
#elif defined FLOAT_CONFIG
int VDSR::load_para(const char*fn)
{
int i;
FILE *fp;
if (fopen_s(&fp, fn, "rb"))
{
printf("open file %s failed\n", fn);
exit(1);
}
C_in.load_para(fp);
for (i = 0;i < layers;i++)
C_layers[i].load_para(fp);
C_out.load_para(fp);
fclose(fp);
return 0;
}
int VDSR::forward(cudnnHandle_t cudnnHandle)
{
int i;
C_in.ConvForward(cudnnHandle, I1.xDesc, I1.x, workspace);
C_in.activate(cudnnHandle);
//C_in.viewmem((xwtype*)I1.x);
C_layers[0].ConvForward(cudnnHandle, C_in.yDesc, C_in.y, workspace);
C_layers[0].activate(cudnnHandle);
//C_layers[0].viewmem((xwtype*)C_in.y);
for (i = 1;i < layers;i++)
{
C_layers[i].ConvForward(cudnnHandle, C_layers[i - 1].yDesc, C_layers[i - 1].y, workspace);
C_layers[i].activate(cudnnHandle);
//C_layers[i].viewmem((xwtype*)C_layers[i - 1].y);
}
C_out.ConvForward(cudnnHandle, C_layers[layers - 1].yDesc, C_layers[layers - 1].y, workspace);
//C_out.viewmem((xwtype*)C_layers[layers - 1].y);
return 0;
}
int VDSR::quantizeNsave(const char*STEP_FILE, const char*BLU_FILE, const char*QUANT_MODEL)
{
int i;
FILE *f_step, *f_blu, *f_quant;
float temp;
if (fopen_s(&f_step, STEP_FILE, "rb"))
{
printf("open file %s failed\n", STEP_FILE);
exit(1);
}
if (fopen_s(&f_blu, BLU_FILE, "rb"))
{
printf("open file %s failed\n", BLU_FILE);
exit(1);
}
if (fopen_s(&f_quant, QUANT_MODEL, "wb"))
{
printf("open file %s failed\n", QUANT_MODEL);
exit(1);
}
C_in.quantizeNsave(f_step, f_blu, f_quant);
for (i = 0;i < layers;i++)
C_layers[i].quantizeNsave(f_step, f_blu, f_quant);
C_out.quantizeNsave(f_step, f_blu, f_quant);
fread(&temp, sizeof(float), 1, f_blu);//ratio_out
fwrite(&temp, sizeof(float), 1, f_quant);//ratio_out
fclose(f_step);
fclose(f_blu);
fclose(f_quant);
return 0;
}
#endif
VDSR::~VDSR(void)
{
delete[] C_layers;
cudaFree(workspace);
return;
}
|
4d8346ede29308dd464a384ba89d6a2e5a90121b.hip | // !!! This is a file automatically generated by hipify!!!
#include <assert.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
__global__ void vector_add(int *a, int *b, int *c, int n) {
int thread_id = (blockIdx.x * blockDim.x) + threadIdx.x;
if (thread_id < n) {
c[thread_id] = a[thread_id] + b[thread_id];
}
}
void init_vector(int *a, int *b, int n) {
for (int i = 0; i < n; ++i) {
a[i] = rand() % 100;
b[i] = rand() % 100;
}
}
void check_answer(int *a, int *b, int *c, int n) {
for (int i = 0; i < n; ++i) {
assert(c[i] == a[i] + b[i]);
}
}
int main() {
// Initial values
int id = hipGetDevice(&id); // Get the device ID for other CUDA calls
int n = 1 << 16; // Number of elements per array
size_t bytes = sizeof(int) * n; // Size of each arrays in bytes
int *a, *b, *c; // Unified memory pointers
// Allocate host memory
a = (int *)malloc(bytes);
b = (int *)malloc(bytes);
c = (int *)malloc(bytes);
// Allocate memory for these pointers
hipMallocManaged(&a, bytes);
hipMallocManaged(&b, bytes);
hipMallocManaged(&c, bytes);
// Initialize vectors
init_vector(a, b, n);
// Set up threads
int BLOCK_SIZE = 256; // Set threadblock size
int GRID_SIZE = (int)ceil(n / BLOCK_SIZE); // Set grid size
// Call CUDA kernel
// Uncomment these for pre-fetching 'a' and 'b' vectors to device
hipMemPrefetchAsync(a, bytes, id);
hipMemPrefetchAsync(b, bytes, id);
hipLaunchKernelGGL(( vector_add), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, a, b, c, n);
// Wait for all previous operations before using values
hipDeviceSynchronize();
// Uncoment this for pre-fetching 'c' to the host
hipMemPrefetchAsync(c, bytes, hipCpuDeviceId);
// Check result
check_answer(a, b, c, n);
} | 4d8346ede29308dd464a384ba89d6a2e5a90121b.cu | #include <assert.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
__global__ void vector_add(int *a, int *b, int *c, int n) {
int thread_id = (blockIdx.x * blockDim.x) + threadIdx.x;
if (thread_id < n) {
c[thread_id] = a[thread_id] + b[thread_id];
}
}
void init_vector(int *a, int *b, int n) {
for (int i = 0; i < n; ++i) {
a[i] = rand() % 100;
b[i] = rand() % 100;
}
}
void check_answer(int *a, int *b, int *c, int n) {
for (int i = 0; i < n; ++i) {
assert(c[i] == a[i] + b[i]);
}
}
int main() {
// Initial values
int id = cudaGetDevice(&id); // Get the device ID for other CUDA calls
int n = 1 << 16; // Number of elements per array
size_t bytes = sizeof(int) * n; // Size of each arrays in bytes
int *a, *b, *c; // Unified memory pointers
// Allocate host memory
a = (int *)malloc(bytes);
b = (int *)malloc(bytes);
c = (int *)malloc(bytes);
// Allocate memory for these pointers
cudaMallocManaged(&a, bytes);
cudaMallocManaged(&b, bytes);
cudaMallocManaged(&c, bytes);
// Initialize vectors
init_vector(a, b, n);
// Set up threads
int BLOCK_SIZE = 256; // Set threadblock size
int GRID_SIZE = (int)ceil(n / BLOCK_SIZE); // Set grid size
// Call CUDA kernel
// Uncomment these for pre-fetching 'a' and 'b' vectors to device
cudaMemPrefetchAsync(a, bytes, id);
cudaMemPrefetchAsync(b, bytes, id);
vector_add<<<GRID_SIZE, BLOCK_SIZE>>>(a, b, c, n);
// Wait for all previous operations before using values
cudaDeviceSynchronize();
// Uncoment this for pre-fetching 'c' to the host
cudaMemPrefetchAsync(c, bytes, cudaCpuDeviceId);
// Check result
check_answer(a, b, c, n);
} |
7180ea3f9a77744a9c4a66fcbb2b958e03b46486.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void one_channel_mul_kernel(float *data_l, float *data_r, float *result)
{
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
int threadId = 2 * (blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x);
int one_ch_index = 2 * ((threadIdx.y * blockDim.x) + threadIdx.x);
result[threadId] = data_l[threadId] * data_r[one_ch_index] - data_l[threadId + 1] * data_r[one_ch_index + 1];
result[threadId + 1] = data_l[threadId] * data_r[one_ch_index + 1] + data_l[threadId + 1] * data_r[one_ch_index];
} | 7180ea3f9a77744a9c4a66fcbb2b958e03b46486.cu | #include "includes.h"
__global__ void one_channel_mul_kernel(float *data_l, float *data_r, float *result)
{
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
int threadId = 2 * (blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x);
int one_ch_index = 2 * ((threadIdx.y * blockDim.x) + threadIdx.x);
result[threadId] = data_l[threadId] * data_r[one_ch_index] - data_l[threadId + 1] * data_r[one_ch_index + 1];
result[threadId + 1] = data_l[threadId] * data_r[one_ch_index + 1] + data_l[threadId + 1] * data_r[one_ch_index];
} |
0a815a4f88463ce9ec7e42db7d8e427401fe3f0b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "star2d2r-512-10-512_kernel.hu"
__device__ double __sbref_wrap(double *sb, size_t index) { return sb[index]; }
__global__ void kernel0_10(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 10;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 472;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
double __reg_5_0;
double __reg_5_1;
double __reg_5_2;
double __reg_5_3;
double __reg_5_4;
double __reg_6_0;
double __reg_6_1;
double __reg_6_2;
double __reg_6_3;
double __reg_6_4;
double __reg_7_0;
double __reg_7_1;
double __reg_7_2;
double __reg_7_3;
double __reg_7_4;
double __reg_8_0;
double __reg_8_1;
double __reg_8_2;
double __reg_8_3;
double __reg_8_4;
double __reg_9_0;
double __reg_9_1;
double __reg_9_2;
double __reg_9_3;
double __reg_9_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9);
const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10);
const AN5D_TYPE __storeValid = __writeValid10;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC5(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC6(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC7(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC8(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC9(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_9_0, 0);
__LOAD(__reg_9_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_9_0, __reg_9_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_9_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_9_0, __reg_9_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_9_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_9_0, __reg_9_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_9_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_9_0, __reg_9_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_9_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_9_0, __reg_9_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_9_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_9_0, __reg_9_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_9_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_9_0, __reg_9_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_9_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_9_0, __reg_9_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_9_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_9_0, __reg_9_1, __reg_8_2, __reg_8_3, __reg_8_4);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_9_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(2, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(3, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(4, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(5, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(6, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(7, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(8, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__LOAD(__reg_0_4, 29);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(9, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__LOAD(__reg_0_0, 30);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(10, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_1, 31);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(11, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__LOAD(__reg_0_2, 32);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(12, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__LOAD(__reg_0_3, 33);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(13, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__LOAD(__reg_0_4, 34);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(14, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__LOAD(__reg_0_0, 35);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(15, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_1, 36);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(16, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__LOAD(__reg_0_2, 37);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(17, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__LOAD(__reg_0_3, 38);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(18, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__LOAD(__reg_0_4, 39);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(19, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__LOAD(__reg_0_0, 40);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(20, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__LOAD(__reg_0_4, 29);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 30);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, 31);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 32);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__LOAD(__reg_0_3, 33);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, 34);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, 35);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_1, 36);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__LOAD(__reg_0_2, 37);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_3, 38);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_4, 39);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__LOAD(__reg_0_0, 40);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(20, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
}
__c_sb = __c_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 41; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 20, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 20, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 20, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 20, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 20, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 20, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 19, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 18, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 17, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 16, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 15, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 14, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 13, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 12, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 11, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 10, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4, __reg_0_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 9, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_0_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 8, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_0_4, __reg_0_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 7, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_0_4);
__STORE(__h - 6, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_0_4, __reg_0_0);
__STORE(__h - 5, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__STORE(__h - 4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_0_4);
__STORE(__h - 3, __reg_9_1, __reg_9_2, __reg_9_3, __reg_0_4, __reg_0_0);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 20, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 19, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 18, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 17, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 16, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 15, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 14, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 13, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 12, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 11, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 10, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 9, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0, __reg_0_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 8, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_0_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 7, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_0_0, __reg_0_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 6, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_0_0);
__STORE(__h - 5, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_0_0, __reg_0_1);
__STORE(__h - 4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__STORE(__h - 3, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_0_0);
__STORE(__h - 2, __reg_9_2, __reg_9_3, __reg_9_4, __reg_0_0, __reg_0_1);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 20, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 19, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 18, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 17, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 16, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 15, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 14, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 13, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 12, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 11, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 10, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1, __reg_0_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 9, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 8, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1, __reg_0_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 7, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_0_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 6, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_0_1, __reg_0_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 5, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_0_1);
__STORE(__h - 4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_0_1, __reg_0_2);
__STORE(__h - 3, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__STORE(__h - 2, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_0_1);
__STORE(__h - 1, __reg_9_3, __reg_9_4, __reg_9_0, __reg_0_1, __reg_0_2);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 20, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 19, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__LOAD(__reg_0_3, __h + 2);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 18, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 17, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 16, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 15, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 14, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 13, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 12, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 11, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 10, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 9, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 8, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 7, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2, __reg_0_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 6, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_0_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 5, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_0_2, __reg_0_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_0_2);
__STORE(__h - 3, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_0_2, __reg_0_3);
__STORE(__h - 2, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__STORE(__h - 1, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_0_2);
__STORE(__h + 0, __reg_9_4, __reg_9_0, __reg_9_1, __reg_0_2, __reg_0_3);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 20, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 19, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__LOAD(__reg_0_3, __h + 2);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 18, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__LOAD(__reg_0_4, __h + 3);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 17, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 16, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 15, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 14, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 13, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 12, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 11, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 10, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 9, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 8, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3, __reg_0_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 7, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 6, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3, __reg_0_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 5, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_0_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_0_3, __reg_0_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 3, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_0_3);
__STORE(__h - 2, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_0_3, __reg_0_4);
__STORE(__h - 1, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(__h + 0, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_0_3);
__STORE(__h + 1, __reg_9_0, __reg_9_1, __reg_9_2, __reg_0_3, __reg_0_4);
}
}
else
{
for (__h = 41; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 20, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 20, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 20, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 20, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 20, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 20, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 20, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 20, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 20, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 20, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__h++;
}
}
__global__ void kernel0_9(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 9;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 476;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
double __reg_5_0;
double __reg_5_1;
double __reg_5_2;
double __reg_5_3;
double __reg_5_4;
double __reg_6_0;
double __reg_6_1;
double __reg_6_2;
double __reg_6_3;
double __reg_6_4;
double __reg_7_0;
double __reg_7_1;
double __reg_7_2;
double __reg_7_3;
double __reg_7_4;
double __reg_8_0;
double __reg_8_1;
double __reg_8_2;
double __reg_8_3;
double __reg_8_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9);
const AN5D_TYPE __storeValid = __writeValid9;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC5(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC6(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC7(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC8(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_8_0, 0);
__LOAD(__reg_8_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_8_0, __reg_8_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_8_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_8_0, __reg_8_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_8_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_8_0, __reg_8_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_8_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_8_0, __reg_8_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_8_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_8_0, __reg_8_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_8_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_8_0, __reg_8_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_8_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_8_0, __reg_8_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_8_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_8_0, __reg_8_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_8_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(5, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(6, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(7, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(8, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(9, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(10, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_4, 29);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(11, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__LOAD(__reg_0_0, 30);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(12, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__LOAD(__reg_0_1, 31);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(13, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__LOAD(__reg_0_2, 32);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(14, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_3, 33);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(15, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_4, 34);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(16, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__LOAD(__reg_0_0, 35);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(17, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__LOAD(__reg_0_1, 36);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(18, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__LOAD(__reg_0_4, 29);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 30);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, 31);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 32);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__LOAD(__reg_0_3, 33);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, 34);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, 35);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_1, 36);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(18, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
}
__c_sb = __c_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 37; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 18, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 18, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 18, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 18, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 18, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 18, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 17, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 16, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 15, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 14, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 13, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 12, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 11, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 10, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 8, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0, __reg_0_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 7, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_0_0);
__STORE(__h - 6, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_0_0, __reg_0_1);
__STORE(__h - 5, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 4, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_0_0);
__STORE(__h - 3, __reg_8_2, __reg_8_3, __reg_8_4, __reg_0_0, __reg_0_1);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 18, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 17, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 16, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 15, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 14, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 13, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 12, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 11, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 10, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1, __reg_0_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 8, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 7, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1, __reg_0_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 6, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_0_1);
__STORE(__h - 5, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_0_1, __reg_0_2);
__STORE(__h - 4, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 3, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_0_1);
__STORE(__h - 2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_0_1, __reg_0_2);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 18, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_3, __h + 1);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 17, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 16, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 15, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 14, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 13, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 12, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 11, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 10, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 8, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 7, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 6, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2, __reg_0_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 5, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_0_2);
__STORE(__h - 4, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_0_2, __reg_0_3);
__STORE(__h - 3, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_0_2);
__STORE(__h - 1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_0_2, __reg_0_3);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 18, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_3, __h + 1);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 17, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_4, __h + 2);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 16, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 15, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 14, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 13, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 12, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 11, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 10, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 8, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 7, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3, __reg_0_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 6, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 5, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3, __reg_0_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 4, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_0_3);
__STORE(__h - 3, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_0_3, __reg_0_4);
__STORE(__h - 2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_0_3);
__STORE(__h + 0, __reg_8_0, __reg_8_1, __reg_8_2, __reg_0_3, __reg_0_4);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 18, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_3, __h + 1);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 17, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_4, __h + 2);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 16, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__LOAD(__reg_0_0, __h + 3);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 15, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 14, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 13, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 12, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 11, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 10, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 8, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 7, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 6, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 5, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 4, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4, __reg_0_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 3, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_0_4);
__STORE(__h - 2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_0_4, __reg_0_0);
__STORE(__h - 1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h + 0, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_0_4);
__STORE(__h + 1, __reg_8_1, __reg_8_2, __reg_8_3, __reg_0_4, __reg_0_0);
}
}
else
{
for (__h = 37; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 18, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 18, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 18, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 18, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 18, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 18, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 18, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 18, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 18, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 18, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__h++;
}
}
__global__ void kernel0_8(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 8;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 480;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
double __reg_5_0;
double __reg_5_1;
double __reg_5_2;
double __reg_5_3;
double __reg_5_4;
double __reg_6_0;
double __reg_6_1;
double __reg_6_2;
double __reg_6_3;
double __reg_6_4;
double __reg_7_0;
double __reg_7_1;
double __reg_7_2;
double __reg_7_3;
double __reg_7_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __storeValid = __writeValid8;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC5(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC6(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC7(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_7_0, 0);
__LOAD(__reg_7_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_7_0, __reg_7_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_7_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_7_0, __reg_7_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_7_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_7_0, __reg_7_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_7_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_7_0, __reg_7_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_7_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_7_0, __reg_7_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_7_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_7_0, __reg_7_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_7_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_7_0, __reg_7_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_7_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(5, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(6, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(7, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(8, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(10, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(11, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(12, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, 29);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(13, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, 30);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_1, 31);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(15, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_2, 32);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__LOAD(__reg_0_4, 29);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 30);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, 31);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 32);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
}
__c_sb = __c_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 33; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 16, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 16, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 16, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1, __reg_0_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1);
__STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1, __reg_0_2);
__STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_0_1);
__STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_0_1, __reg_0_2);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2);
__STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_0_2);
__STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_0_2, __reg_0_3);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3, __reg_0_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3);
__STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3, __reg_0_4);
__STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_0_3);
__STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2, __reg_0_3, __reg_0_4);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, __h + 2);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4);
__STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4, __reg_0_0);
__STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_0_4);
__STORE(__h + 0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_0_4, __reg_0_0);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, __h + 2);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_1, __h + 3);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0);
__STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0, __reg_0_1);
__STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h + 0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_0_0);
__STORE(__h + 1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_0_0, __reg_0_1);
}
}
else
{
for (__h = 33; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 16, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 16, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 16, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 16, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 16, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 16, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__h++;
}
}
__global__ void kernel0_7(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 7;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 484;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
double __reg_5_0;
double __reg_5_1;
double __reg_5_2;
double __reg_5_3;
double __reg_5_4;
double __reg_6_0;
double __reg_6_1;
double __reg_6_2;
double __reg_6_3;
double __reg_6_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __storeValid = __writeValid7;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC5(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC6(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_6_0, 0);
__LOAD(__reg_6_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_6_0, __reg_6_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_6_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_6_0, __reg_6_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_6_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_6_0, __reg_6_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_6_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_6_0, __reg_6_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_6_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_6_0, __reg_6_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_6_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_6_0, __reg_6_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_6_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(5, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(6, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(8, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(9, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(10, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(11, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(13, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
}
__c_sb = __c_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 29; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 14, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 14, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 14, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2);
__STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3);
__STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2);
__STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2, __reg_0_3);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3);
__STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3, __reg_0_4);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4);
__STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4, __reg_0_0);
__STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4);
__STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4, __reg_0_0);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, __h + 2);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0);
__STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0);
__STORE(__h + 0, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0, __reg_0_1);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, __h + 2);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, __h + 3);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1, __reg_0_2);
__STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h + 0, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1);
__STORE(__h + 1, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1, __reg_0_2);
}
}
else
{
for (__h = 29; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 14, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 14, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 14, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 14, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 14, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 14, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__h++;
}
}
__global__ void kernel0_6(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 6;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 488;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
double __reg_5_0;
double __reg_5_1;
double __reg_5_2;
double __reg_5_3;
double __reg_5_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __storeValid = __writeValid6;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC5(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_5_0, 0);
__LOAD(__reg_5_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_5_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_5_0, __reg_5_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_5_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_5_0, __reg_5_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_5_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_5_0, __reg_5_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_5_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_5_0, __reg_5_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_5_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(6, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(7, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(8, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(9, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(11, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
}
__c_sb = __c_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 25; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 12, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 12, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 12, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3);
__STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4);
__STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3, __reg_0_4);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4);
__STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0);
__STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0);
__STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0);
__STORE(__h - 1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0, __reg_0_1);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, __h + 2);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1);
__STORE(__h + 0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1, __reg_0_2);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, __h + 2);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, __h + 3);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3);
__STORE(__h - 1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h + 0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2);
__STORE(__h + 1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3);
}
}
else
{
for (__h = 25; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 12, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 12, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 12, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 12, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 12, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 12, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__h++;
}
}
__global__ void kernel0_5(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 492;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __storeValid = __writeValid5;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_4_0, 0);
__LOAD(__reg_4_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_4_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_4_0, __reg_4_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_4_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_4_0, __reg_4_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_4_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_4_0, __reg_4_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_4_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(5, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(6, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(7, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(9, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
}
__c_sb = __c_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 21; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 10, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 10, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 10, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
__STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0);
__STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1);
__STORE(__h - 1, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, __h + 2);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
__STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 1, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2);
__STORE(__h + 0, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, __h + 2);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, __h + 3);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
__STORE(__h - 1, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h + 0, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3);
__STORE(__h + 1, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4);
}
}
else
{
for (__h = 21; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 10, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 10, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 10, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 10, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 10, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 10, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
}
}
__global__ void kernel0_4(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __storeValid = __writeValid4;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_3_0, 0);
__LOAD(__reg_3_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_3_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_3_0, __reg_3_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_3_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_3_0, __reg_3_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_3_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(5, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(7, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
}
__c_sb = __c_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 17; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 8, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 8, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 8, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, __h + 1);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__STORE(__h - 1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, __h + 1);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, __h + 2);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__STORE(__h + 0, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, __h + 1);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, __h + 2);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, __h + 3);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__STORE(__h - 1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h + 0, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__STORE(__h + 1, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
}
}
else
{
for (__h = 17; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 8, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 8, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 8, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 8, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 8, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 8, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__h++;
}
}
__global__ void kernel0_3(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __storeValid = __writeValid3;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_2_0, 0);
__LOAD(__reg_2_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_2_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_2_0, __reg_2_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_2_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(5, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
}
__c_sb = __c_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 13; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 6, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 6, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(__h - 6, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(__h - 6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__STORE(__h - 1, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, __h + 2);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 1, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__STORE(__h + 0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, __h + 2);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, __h + 3);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__STORE(__h - 1, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h + 0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__STORE(__h + 1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
}
}
else
{
for (__h = 13; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 6, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 6, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(__h - 6, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(__h - 6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 6, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 6, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(__h - 6, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(__h - 6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__h++;
}
}
__global__ void kernel0_2(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __storeValid = __writeValid2;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_1_0, 0);
__LOAD(__reg_1_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_1_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
}
__c_sb = __c_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 4, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 4, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__STORE(__h - 1, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, __h + 2);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(__h - 1, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__STORE(__h + 0, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, __h + 2);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, __h + 3);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 1, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(__h + 0, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__STORE(__h + 1, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
}
}
else
{
for (__h = 9; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 4, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 4, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 4, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 4, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__h++;
}
}
__global__ void kernel0_1(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __storeValid = __writeValid1;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__STORE(2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__STORE(2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
}
__c_sb = __c_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_0, __h);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__h++;
__LOAD(__reg_0_1, __h);
__STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__h++;
__LOAD(__reg_0_2, __h);
__STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
__LOAD(__reg_0_3, __h);
__STORE(__h - 2, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__h++;
__LOAD(__reg_0_4, __h);
__STORE(__h - 2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, __h + 2);
__STORE(__h + 0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, __h + 2);
__STORE(__h + 0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, __h + 3);
__STORE(__h + 1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
}
}
else
{
for (__h = 5; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_0, __h);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__h++;
__LOAD(__reg_0_1, __h);
__STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__h++;
__LOAD(__reg_0_2, __h);
__STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
__LOAD(__reg_0_3, __h);
__STORE(__h - 2, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__h++;
__LOAD(__reg_0_4, __h);
__STORE(__h - 2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__STORE(__h - 2, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__STORE(__h - 2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__h++;
}
}
| 0a815a4f88463ce9ec7e42db7d8e427401fe3f0b.cu | #include "star2d2r-512-10-512_kernel.hu"
__device__ double __sbref_wrap(double *sb, size_t index) { return sb[index]; }
__global__ void kernel0_10(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 10;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 472;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
double __reg_5_0;
double __reg_5_1;
double __reg_5_2;
double __reg_5_3;
double __reg_5_4;
double __reg_6_0;
double __reg_6_1;
double __reg_6_2;
double __reg_6_3;
double __reg_6_4;
double __reg_7_0;
double __reg_7_1;
double __reg_7_2;
double __reg_7_3;
double __reg_7_4;
double __reg_8_0;
double __reg_8_1;
double __reg_8_2;
double __reg_8_3;
double __reg_8_4;
double __reg_9_0;
double __reg_9_1;
double __reg_9_2;
double __reg_9_3;
double __reg_9_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9);
const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10);
const AN5D_TYPE __storeValid = __writeValid10;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC5(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC6(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC7(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC8(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC9(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_9_0, 0);
__LOAD(__reg_9_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_9_0, __reg_9_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_9_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_9_0, __reg_9_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_9_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_9_0, __reg_9_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_9_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_9_0, __reg_9_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_9_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_9_0, __reg_9_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_9_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_9_0, __reg_9_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_9_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_9_0, __reg_9_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_9_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_9_0, __reg_9_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_9_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_9_0, __reg_9_1, __reg_8_2, __reg_8_3, __reg_8_4);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_9_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(2, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(3, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(4, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(5, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(6, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(7, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(8, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__LOAD(__reg_0_4, 29);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(9, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__LOAD(__reg_0_0, 30);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(10, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_1, 31);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(11, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__LOAD(__reg_0_2, 32);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(12, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__LOAD(__reg_0_3, 33);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(13, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__LOAD(__reg_0_4, 34);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(14, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__LOAD(__reg_0_0, 35);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(15, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_1, 36);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(16, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__LOAD(__reg_0_2, 37);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(17, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__LOAD(__reg_0_3, 38);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(18, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__LOAD(__reg_0_4, 39);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(19, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__LOAD(__reg_0_0, 40);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(20, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__LOAD(__reg_0_4, 29);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 30);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, 31);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 32);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__LOAD(__reg_0_3, 33);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, 34);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, 35);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_1, 36);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__LOAD(__reg_0_2, 37);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_3, 38);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_4, 39);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__LOAD(__reg_0_0, 40);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(20, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
}
__c_sb = __c_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 41; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 20, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 20, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 20, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 20, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 20, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 20, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 19, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 18, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 17, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 16, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 15, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 14, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 13, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 12, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 11, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 10, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4, __reg_0_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 9, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_0_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 8, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_0_4, __reg_0_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 7, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_0_4);
__STORE(__h - 6, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_0_4, __reg_0_0);
__STORE(__h - 5, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__STORE(__h - 4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_0_4);
__STORE(__h - 3, __reg_9_1, __reg_9_2, __reg_9_3, __reg_0_4, __reg_0_0);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 20, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 19, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 18, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 17, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 16, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 15, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 14, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 13, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 12, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 11, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 10, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 9, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0, __reg_0_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 8, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_0_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 7, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_0_0, __reg_0_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 6, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_0_0);
__STORE(__h - 5, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_0_0, __reg_0_1);
__STORE(__h - 4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__STORE(__h - 3, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_0_0);
__STORE(__h - 2, __reg_9_2, __reg_9_3, __reg_9_4, __reg_0_0, __reg_0_1);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 20, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 19, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 18, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 17, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 16, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 15, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 14, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 13, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 12, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 11, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 10, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1, __reg_0_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 9, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 8, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1, __reg_0_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 7, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_0_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 6, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_0_1, __reg_0_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 5, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_0_1);
__STORE(__h - 4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_0_1, __reg_0_2);
__STORE(__h - 3, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__STORE(__h - 2, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_0_1);
__STORE(__h - 1, __reg_9_3, __reg_9_4, __reg_9_0, __reg_0_1, __reg_0_2);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 20, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 19, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__LOAD(__reg_0_3, __h + 2);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 18, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 17, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 16, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 15, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 14, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 13, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 12, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 11, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 10, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 9, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 8, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 7, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2, __reg_0_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 6, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_0_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 5, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_0_2, __reg_0_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_0_2);
__STORE(__h - 3, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_0_2, __reg_0_3);
__STORE(__h - 2, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__STORE(__h - 1, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_0_2);
__STORE(__h + 0, __reg_9_4, __reg_9_0, __reg_9_1, __reg_0_2, __reg_0_3);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 20, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 19, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__LOAD(__reg_0_3, __h + 2);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 18, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__LOAD(__reg_0_4, __h + 3);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 17, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 16, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 15, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 14, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 13, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 12, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 11, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 10, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 9, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 8, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3, __reg_0_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 7, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 6, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3, __reg_0_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 5, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_0_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_0_3, __reg_0_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 3, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_0_3);
__STORE(__h - 2, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_0_3, __reg_0_4);
__STORE(__h - 1, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(__h + 0, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_0_3);
__STORE(__h + 1, __reg_9_0, __reg_9_1, __reg_9_2, __reg_0_3, __reg_0_4);
}
}
else
{
for (__h = 41; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 20, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 20, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 20, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 20, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 20, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 20, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 20, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 20, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 20, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 20, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__h++;
}
}
__global__ void kernel0_9(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 9;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 476;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
double __reg_5_0;
double __reg_5_1;
double __reg_5_2;
double __reg_5_3;
double __reg_5_4;
double __reg_6_0;
double __reg_6_1;
double __reg_6_2;
double __reg_6_3;
double __reg_6_4;
double __reg_7_0;
double __reg_7_1;
double __reg_7_2;
double __reg_7_3;
double __reg_7_4;
double __reg_8_0;
double __reg_8_1;
double __reg_8_2;
double __reg_8_3;
double __reg_8_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9);
const AN5D_TYPE __storeValid = __writeValid9;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC5(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC6(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC7(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC8(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_8_0, 0);
__LOAD(__reg_8_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_8_0, __reg_8_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_8_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_8_0, __reg_8_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_8_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_8_0, __reg_8_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_8_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_8_0, __reg_8_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_8_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_8_0, __reg_8_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_8_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_8_0, __reg_8_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_8_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_8_0, __reg_8_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_8_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_8_0, __reg_8_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_8_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(5, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(6, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(7, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(8, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(9, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(10, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_4, 29);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(11, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__LOAD(__reg_0_0, 30);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(12, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__LOAD(__reg_0_1, 31);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(13, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__LOAD(__reg_0_2, 32);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(14, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_3, 33);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(15, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_4, 34);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(16, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__LOAD(__reg_0_0, 35);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(17, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__LOAD(__reg_0_1, 36);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(18, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__LOAD(__reg_0_4, 29);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 30);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, 31);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 32);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__LOAD(__reg_0_3, 33);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, 34);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, 35);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_1, 36);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(18, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
}
__c_sb = __c_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 37; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 18, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 18, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 18, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 18, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 18, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 18, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 17, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 16, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 15, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 14, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 13, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 12, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 11, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 10, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 8, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0, __reg_0_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 7, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_0_0);
__STORE(__h - 6, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_0_0, __reg_0_1);
__STORE(__h - 5, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 4, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_0_0);
__STORE(__h - 3, __reg_8_2, __reg_8_3, __reg_8_4, __reg_0_0, __reg_0_1);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 18, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 17, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 16, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 15, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 14, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 13, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 12, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 11, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 10, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1, __reg_0_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 8, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 7, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1, __reg_0_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 6, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_0_1);
__STORE(__h - 5, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_0_1, __reg_0_2);
__STORE(__h - 4, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 3, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_0_1);
__STORE(__h - 2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_0_1, __reg_0_2);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 18, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_3, __h + 1);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 17, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 16, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 15, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 14, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 13, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 12, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 11, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 10, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 8, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 7, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 6, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2, __reg_0_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 5, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_0_2);
__STORE(__h - 4, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_0_2, __reg_0_3);
__STORE(__h - 3, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_0_2);
__STORE(__h - 1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_0_2, __reg_0_3);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 18, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_3, __h + 1);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 17, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_4, __h + 2);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 16, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 15, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 14, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 13, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 12, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 11, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 10, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 8, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 7, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3, __reg_0_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 6, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 5, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3, __reg_0_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 4, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_0_3);
__STORE(__h - 3, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_0_3, __reg_0_4);
__STORE(__h - 2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_0_3);
__STORE(__h + 0, __reg_8_0, __reg_8_1, __reg_8_2, __reg_0_3, __reg_0_4);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 18, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_3, __h + 1);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 17, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_4, __h + 2);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 16, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__LOAD(__reg_0_0, __h + 3);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 15, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 14, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 13, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 12, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 11, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 10, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 8, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 7, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 6, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 5, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 4, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4, __reg_0_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 3, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_0_4);
__STORE(__h - 2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_0_4, __reg_0_0);
__STORE(__h - 1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h + 0, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_0_4);
__STORE(__h + 1, __reg_8_1, __reg_8_2, __reg_8_3, __reg_0_4, __reg_0_0);
}
}
else
{
for (__h = 37; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 18, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 18, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 18, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 18, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 18, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 18, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 18, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 18, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 18, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 18, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__h++;
}
}
__global__ void kernel0_8(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 8;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 480;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
double __reg_5_0;
double __reg_5_1;
double __reg_5_2;
double __reg_5_3;
double __reg_5_4;
double __reg_6_0;
double __reg_6_1;
double __reg_6_2;
double __reg_6_3;
double __reg_6_4;
double __reg_7_0;
double __reg_7_1;
double __reg_7_2;
double __reg_7_3;
double __reg_7_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __storeValid = __writeValid8;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC5(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC6(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC7(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_7_0, 0);
__LOAD(__reg_7_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_7_0, __reg_7_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_7_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_7_0, __reg_7_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_7_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_7_0, __reg_7_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_7_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_7_0, __reg_7_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_7_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_7_0, __reg_7_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_7_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_7_0, __reg_7_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_7_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_7_0, __reg_7_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_7_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(5, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(6, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(7, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(8, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(10, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(11, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(12, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, 29);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(13, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, 30);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_1, 31);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(15, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_2, 32);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__LOAD(__reg_0_4, 29);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 30);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, 31);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 32);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
}
__c_sb = __c_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 33; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 16, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 16, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 16, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1, __reg_0_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1);
__STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1, __reg_0_2);
__STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_0_1);
__STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_0_1, __reg_0_2);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2);
__STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_0_2);
__STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_0_2, __reg_0_3);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3, __reg_0_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3);
__STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3, __reg_0_4);
__STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_0_3);
__STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2, __reg_0_3, __reg_0_4);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, __h + 2);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4);
__STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4, __reg_0_0);
__STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_0_4);
__STORE(__h + 0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_0_4, __reg_0_0);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, __h + 2);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_1, __h + 3);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0);
__STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0, __reg_0_1);
__STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h + 0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_0_0);
__STORE(__h + 1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_0_0, __reg_0_1);
}
}
else
{
for (__h = 33; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 16, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 16, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 16, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 16, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 16, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 16, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__h++;
}
}
__global__ void kernel0_7(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 7;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 484;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
double __reg_5_0;
double __reg_5_1;
double __reg_5_2;
double __reg_5_3;
double __reg_5_4;
double __reg_6_0;
double __reg_6_1;
double __reg_6_2;
double __reg_6_3;
double __reg_6_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __storeValid = __writeValid7;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC5(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC6(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_6_0, 0);
__LOAD(__reg_6_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_6_0, __reg_6_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_6_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_6_0, __reg_6_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_6_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_6_0, __reg_6_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_6_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_6_0, __reg_6_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_6_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_6_0, __reg_6_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_6_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_6_0, __reg_6_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_6_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(5, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(6, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(8, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(9, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(10, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(11, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(13, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
}
__c_sb = __c_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 29; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 14, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 14, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 14, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2);
__STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3);
__STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2);
__STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2, __reg_0_3);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3);
__STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3, __reg_0_4);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4);
__STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4, __reg_0_0);
__STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4);
__STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4, __reg_0_0);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, __h + 2);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0);
__STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0);
__STORE(__h + 0, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0, __reg_0_1);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, __h + 2);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, __h + 3);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1, __reg_0_2);
__STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h + 0, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1);
__STORE(__h + 1, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1, __reg_0_2);
}
}
else
{
for (__h = 29; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 14, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 14, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 14, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 14, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 14, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 14, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__h++;
}
}
__global__ void kernel0_6(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 6;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 488;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
double __reg_5_0;
double __reg_5_1;
double __reg_5_2;
double __reg_5_3;
double __reg_5_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __storeValid = __writeValid6;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC5(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_5_0, 0);
__LOAD(__reg_5_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_5_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_5_0, __reg_5_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_5_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_5_0, __reg_5_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_5_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_5_0, __reg_5_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_5_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_5_0, __reg_5_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_5_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(6, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(7, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(8, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(9, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(11, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
}
__c_sb = __c_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 25; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 12, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 12, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 12, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3);
__STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4);
__STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3, __reg_0_4);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4);
__STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0);
__STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0);
__STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0);
__STORE(__h - 1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0, __reg_0_1);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, __h + 2);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1);
__STORE(__h + 0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1, __reg_0_2);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, __h + 2);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, __h + 3);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3);
__STORE(__h - 1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h + 0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2);
__STORE(__h + 1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3);
}
}
else
{
for (__h = 25; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 12, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 12, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 12, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 12, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 12, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 12, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__h++;
}
}
__global__ void kernel0_5(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 492;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __storeValid = __writeValid5;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_4_0, 0);
__LOAD(__reg_4_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_4_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_4_0, __reg_4_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_4_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_4_0, __reg_4_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_4_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_4_0, __reg_4_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_4_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(5, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(6, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(7, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(9, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
}
__c_sb = __c_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 21; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 10, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 10, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 10, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
__STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0);
__STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1);
__STORE(__h - 1, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, __h + 2);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
__STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 1, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2);
__STORE(__h + 0, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, __h + 2);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, __h + 3);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
__STORE(__h - 1, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h + 0, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3);
__STORE(__h + 1, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4);
}
}
else
{
for (__h = 21; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 10, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 10, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 10, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 10, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 10, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 10, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
}
}
__global__ void kernel0_4(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __storeValid = __writeValid4;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_3_0, 0);
__LOAD(__reg_3_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_3_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_3_0, __reg_3_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_3_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_3_0, __reg_3_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_3_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(5, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(7, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
}
__c_sb = __c_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 17; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 8, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 8, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 8, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, __h + 1);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__STORE(__h - 1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, __h + 1);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, __h + 2);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__STORE(__h + 0, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, __h + 1);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, __h + 2);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, __h + 3);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__STORE(__h - 1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h + 0, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__STORE(__h + 1, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
}
}
else
{
for (__h = 17; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 8, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 8, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 8, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 8, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 8, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 8, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__h++;
}
}
__global__ void kernel0_3(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __storeValid = __writeValid3;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_2_0, 0);
__LOAD(__reg_2_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_2_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_2_0, __reg_2_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_2_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(5, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
}
__c_sb = __c_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 13; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 6, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 6, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(__h - 6, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(__h - 6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__STORE(__h - 1, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, __h + 2);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 1, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__STORE(__h + 0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, __h + 2);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, __h + 3);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__STORE(__h - 1, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h + 0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__STORE(__h + 1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
}
}
else
{
for (__h = 13; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 6, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 6, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(__h - 6, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(__h - 6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 6, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 6, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(__h - 6, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(__h - 6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__h++;
}
}
__global__ void kernel0_2(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __storeValid = __writeValid2;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_1_0, 0);
__LOAD(__reg_1_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_1_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
}
__c_sb = __c_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 4, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 4, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__STORE(__h - 1, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, __h + 2);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(__h - 1, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__STORE(__h + 0, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, __h + 2);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, __h + 3);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 1, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(__h + 0, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__STORE(__h + 1, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
}
}
else
{
for (__h = 9; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 4, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 4, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 4, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 4, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__h++;
}
}
__global__ void kernel0_1(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __storeValid = __writeValid1;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__STORE(2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__STORE(2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
}
__c_sb = __c_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_0, __h);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__h++;
__LOAD(__reg_0_1, __h);
__STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__h++;
__LOAD(__reg_0_2, __h);
__STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
__LOAD(__reg_0_3, __h);
__STORE(__h - 2, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__h++;
__LOAD(__reg_0_4, __h);
__STORE(__h - 2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, __h + 2);
__STORE(__h + 0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, __h + 2);
__STORE(__h + 0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, __h + 3);
__STORE(__h + 1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
}
}
else
{
for (__h = 5; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_0, __h);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__h++;
__LOAD(__reg_0_1, __h);
__STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__h++;
__LOAD(__reg_0_2, __h);
__STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
__LOAD(__reg_0_3, __h);
__STORE(__h - 2, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__h++;
__LOAD(__reg_0_4, __h);
__STORE(__h - 2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__STORE(__h - 2, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__STORE(__h - 2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__h++;
}
}
|
7a4d3a8cb0da49e6b47c876ad843fd621c12be0c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
/* Acknowledgement: the following code is strongly inspired by
https://github.com/caffe2/caffe2/blob/master/caffe2/operators/lstm_unit_op_gpu.cu
*/
#include "paddle/framework/op_registry.h"
#include "paddle/operators/cross_entropy_op.h"
#include "paddle/platform/assert.h"
#include "paddle/platform/hostdevice.h"
namespace paddle {
namespace operators {
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
template <typename Dtype>
__device__ Dtype cuda_sigmoid(const Dtype x) {
return Dtype(1) / (Dtype(1) + exp(-x));
}
template <typename Dtype>
__device__ Dtype cuda_tanh(const Dtype x) {
return Dtype(1 - exp(-2. * x)) / (Dtype(1) + exp(-2. * x));
}
template <typename T>
__global__ void LSTMUnitKernel(const int nthreads, const int dim,
const T* C_prev, const T* X, T* C, T* H,
const T forget_bias) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
const int n = index / dim;
const int d = index % dim;
const T* X_offset = X + 4 * dim * n;
const T i = cuda_sigmoid(X_offset[d]);
const T f = cuda_sigmoid(X_offset[1 * dim + d] + forget_bias);
const T o = cuda_sigmoid(X_offset[2 * dim + d]);
const T g = cuda_tanh(X_offset[3 * dim + d]);
const T c_prev = C_prev[index];
const T c = f * c_prev + i * g;
C[index] = c;
const T tanh_c = cuda_tanh(c);
H[index] = o * tanh_c;
}
}
template <typename T>
__global__ void LSTMUnitGradientKernel(const int nthreads, const int dim,
const T* C_prev, const T* X, const T* C,
const T* H, const T* C_diff,
const T* H_diff, T* C_prev_diff,
T* X_diff, const T forget_bias) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
const int n = index / dim;
const int d = index % dim;
const T* X_offset = X + 4 * dim * n;
T* c_prev_diff = C_prev_diff + index;
T* X_diff_offset = X_diff + 4 * dim * n;
T* i_diff = X_diff_offset + d;
T* f_diff = X_diff_offset + 1 * dim + d;
T* o_diff = X_diff_offset + 2 * dim + d;
T* g_diff = X_diff_offset + 3 * dim + d;
const T i = cuda_sigmoid(X_offset[d]);
const T f = cuda_sigmoid(X_offset[1 * dim + d] + forget_bias);
const T o = cuda_sigmoid(X_offset[2 * dim + d]);
const T g = cuda_tanh(X_offset[3 * dim + d]);
const T c_prev = C_prev[index];
const T c = C[index];
const T tanh_c = cuda_tanh(c);
const T c_term_diff =
C_diff[index] + H_diff[index] * o * (1 - tanh_c * tanh_c);
*c_prev_diff = c_term_diff * f;
*i_diff = c_term_diff * g * i * (1 - i);
*f_diff = c_term_diff * c_prev * f * (1 - f);
*o_diff = H_diff[index] * tanh_c * o * (1 - o);
*g_diff = c_term_diff * i * (1 - g * g);
}
}
template <typename T>
class LstmUnitOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()),
"It must use GPUPlace.");
auto* x_tensor = ctx.Input<framework::Tensor>("X");
auto* c_prev_tensor = ctx.Input<framework::Tensor>("C_prev");
auto* c_tensor = ctx.Output<framework::Tensor>("C");
auto* h_tensor = ctx.Output<framework::Tensor>("H");
auto forget_bias = static_cast<T>(ctx.Attr<float>("forget_bias"));
int b_size = c_tensor->dims()[0];
int D = c_tensor->dims()[1];
const T* X = x_tensor->data<T>();
const T* C_prev = c_prev_tensor->data<T>();
T* C = c_tensor->mutable_data<T>(ctx.GetPlace());
T* H = h_tensor->mutable_data<T>(ctx.GetPlace());
int block = 512;
int n = b_size * D;
int grid = (n + block - 1) / block;
hipLaunchKernelGGL(( LSTMUnitKernel<T>), dim3(grid), dim3(block), 0, 0, n, D, C_prev, X, C, H, forget_bias);
}
};
template <typename T>
class LstmUnitGradOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()),
"It must use GPUPlace.");
auto x_tensor = ctx.Input<Tensor>("X");
auto c_prev_tensor = ctx.Input<Tensor>("C_prev");
auto c_tensor = ctx.Input<Tensor>("C");
auto h_tensor = ctx.Input<Tensor>("H");
auto hdiff_tensor = ctx.Input<Tensor>(framework::GradVarName("H"));
auto cdiff_tensor = ctx.Input<Tensor>(framework::GradVarName("C"));
auto xdiff_tensor = ctx.Output<Tensor>(framework::GradVarName("X"));
auto c_prev_diff_tensor =
ctx.Output<Tensor>(framework::GradVarName("C_prev"));
auto* X = x_tensor->data<T>();
auto* C_prev = c_prev_tensor->data<T>();
auto* C = c_tensor->data<T>();
auto* H = h_tensor->data<T>();
auto* H_diff = hdiff_tensor->data<T>();
auto* C_diff = cdiff_tensor->data<T>();
auto* C_prev_diff = c_prev_diff_tensor->mutable_data<T>(ctx.GetPlace());
auto* X_diff = xdiff_tensor->mutable_data<T>(ctx.GetPlace());
int N = c_tensor->dims()[0];
int D = c_tensor->dims()[1];
auto forget_bias = static_cast<T>(ctx.Attr<float>("forget_bias"));
int block = 512;
int n = N * D;
int grid = (n + block - 1) / block;
hipLaunchKernelGGL(( LSTMUnitGradientKernel<T>), dim3(grid), dim3(block), 0, 0, n, D, C_prev, X, C, H, C_diff,
H_diff, C_prev_diff, X_diff,
forget_bias);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL(lstm_unit, ops::LstmUnitOpCUDAKernel<float>,
ops::LstmUnitOpCUDAKernel<double>);
REGISTER_OP_GPU_KERNEL(lstm_unit_grad, ops::LstmUnitGradOpCUDAKernel<float>,
ops::LstmUnitGradOpCUDAKernel<double>);
| 7a4d3a8cb0da49e6b47c876ad843fd621c12be0c.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
/* Acknowledgement: the following code is strongly inspired by
https://github.com/caffe2/caffe2/blob/master/caffe2/operators/lstm_unit_op_gpu.cu
*/
#include "paddle/framework/op_registry.h"
#include "paddle/operators/cross_entropy_op.h"
#include "paddle/platform/assert.h"
#include "paddle/platform/hostdevice.h"
namespace paddle {
namespace operators {
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
template <typename Dtype>
__device__ Dtype cuda_sigmoid(const Dtype x) {
return Dtype(1) / (Dtype(1) + exp(-x));
}
template <typename Dtype>
__device__ Dtype cuda_tanh(const Dtype x) {
return Dtype(1 - exp(-2. * x)) / (Dtype(1) + exp(-2. * x));
}
template <typename T>
__global__ void LSTMUnitKernel(const int nthreads, const int dim,
const T* C_prev, const T* X, T* C, T* H,
const T forget_bias) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
const int n = index / dim;
const int d = index % dim;
const T* X_offset = X + 4 * dim * n;
const T i = cuda_sigmoid(X_offset[d]);
const T f = cuda_sigmoid(X_offset[1 * dim + d] + forget_bias);
const T o = cuda_sigmoid(X_offset[2 * dim + d]);
const T g = cuda_tanh(X_offset[3 * dim + d]);
const T c_prev = C_prev[index];
const T c = f * c_prev + i * g;
C[index] = c;
const T tanh_c = cuda_tanh(c);
H[index] = o * tanh_c;
}
}
template <typename T>
__global__ void LSTMUnitGradientKernel(const int nthreads, const int dim,
const T* C_prev, const T* X, const T* C,
const T* H, const T* C_diff,
const T* H_diff, T* C_prev_diff,
T* X_diff, const T forget_bias) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
const int n = index / dim;
const int d = index % dim;
const T* X_offset = X + 4 * dim * n;
T* c_prev_diff = C_prev_diff + index;
T* X_diff_offset = X_diff + 4 * dim * n;
T* i_diff = X_diff_offset + d;
T* f_diff = X_diff_offset + 1 * dim + d;
T* o_diff = X_diff_offset + 2 * dim + d;
T* g_diff = X_diff_offset + 3 * dim + d;
const T i = cuda_sigmoid(X_offset[d]);
const T f = cuda_sigmoid(X_offset[1 * dim + d] + forget_bias);
const T o = cuda_sigmoid(X_offset[2 * dim + d]);
const T g = cuda_tanh(X_offset[3 * dim + d]);
const T c_prev = C_prev[index];
const T c = C[index];
const T tanh_c = cuda_tanh(c);
const T c_term_diff =
C_diff[index] + H_diff[index] * o * (1 - tanh_c * tanh_c);
*c_prev_diff = c_term_diff * f;
*i_diff = c_term_diff * g * i * (1 - i);
*f_diff = c_term_diff * c_prev * f * (1 - f);
*o_diff = H_diff[index] * tanh_c * o * (1 - o);
*g_diff = c_term_diff * i * (1 - g * g);
}
}
template <typename T>
class LstmUnitOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()),
"It must use GPUPlace.");
auto* x_tensor = ctx.Input<framework::Tensor>("X");
auto* c_prev_tensor = ctx.Input<framework::Tensor>("C_prev");
auto* c_tensor = ctx.Output<framework::Tensor>("C");
auto* h_tensor = ctx.Output<framework::Tensor>("H");
auto forget_bias = static_cast<T>(ctx.Attr<float>("forget_bias"));
int b_size = c_tensor->dims()[0];
int D = c_tensor->dims()[1];
const T* X = x_tensor->data<T>();
const T* C_prev = c_prev_tensor->data<T>();
T* C = c_tensor->mutable_data<T>(ctx.GetPlace());
T* H = h_tensor->mutable_data<T>(ctx.GetPlace());
int block = 512;
int n = b_size * D;
int grid = (n + block - 1) / block;
LSTMUnitKernel<T><<<grid, block>>>(n, D, C_prev, X, C, H, forget_bias);
}
};
template <typename T>
class LstmUnitGradOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()),
"It must use GPUPlace.");
auto x_tensor = ctx.Input<Tensor>("X");
auto c_prev_tensor = ctx.Input<Tensor>("C_prev");
auto c_tensor = ctx.Input<Tensor>("C");
auto h_tensor = ctx.Input<Tensor>("H");
auto hdiff_tensor = ctx.Input<Tensor>(framework::GradVarName("H"));
auto cdiff_tensor = ctx.Input<Tensor>(framework::GradVarName("C"));
auto xdiff_tensor = ctx.Output<Tensor>(framework::GradVarName("X"));
auto c_prev_diff_tensor =
ctx.Output<Tensor>(framework::GradVarName("C_prev"));
auto* X = x_tensor->data<T>();
auto* C_prev = c_prev_tensor->data<T>();
auto* C = c_tensor->data<T>();
auto* H = h_tensor->data<T>();
auto* H_diff = hdiff_tensor->data<T>();
auto* C_diff = cdiff_tensor->data<T>();
auto* C_prev_diff = c_prev_diff_tensor->mutable_data<T>(ctx.GetPlace());
auto* X_diff = xdiff_tensor->mutable_data<T>(ctx.GetPlace());
int N = c_tensor->dims()[0];
int D = c_tensor->dims()[1];
auto forget_bias = static_cast<T>(ctx.Attr<float>("forget_bias"));
int block = 512;
int n = N * D;
int grid = (n + block - 1) / block;
LSTMUnitGradientKernel<T><<<grid, block>>>(n, D, C_prev, X, C, H, C_diff,
H_diff, C_prev_diff, X_diff,
forget_bias);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL(lstm_unit, ops::LstmUnitOpCUDAKernel<float>,
ops::LstmUnitOpCUDAKernel<double>);
REGISTER_OP_GPU_KERNEL(lstm_unit_grad, ops::LstmUnitGradOpCUDAKernel<float>,
ops::LstmUnitGradOpCUDAKernel<double>);
|
52a974896ee2b322d3c9fd10d7bb9e11e6428bff.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Vector-pool aggregation based local feature aggregation for point cloud.
PV-RCNN++: Point-Voxel Feature Set Abstraction With Local Vector Representation for 3D Object Detection
https://arxiv.org/abs/2102.00463
Written by Shaoshuai Shi
All Rights Reserved 2020.
*/
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "vector_pool_gpu.h"
#include "cuda_utils.h"
__global__ void query_three_nn_by_stacked_local_idxs_kernel(
const float *support_xyz, const float *new_xyz, const float *new_xyz_grid_centers,
int *new_xyz_grid_idxs, float *new_xyz_grid_dist2,
const int *stack_neighbor_idxs, const int *start_len,
int M, int num_total_grids){
// support_xyz: (N1 + N2 ..., 3) xyz coordinates of the features
// new_xyz: (M1 + M2 ..., 3) centers of the ball query
// new_xyz_grid_centers: (M1 + M2 ..., num_total_grids, 3) grids centers of each grid
// new_xyz_grid_idxs: (M1 + M2 ..., num_total_grids, 3) three-nn
// new_xyz_grid_dist2: (M1 + M2 ..., num_total_grids, 3) square of dist of three-nn
// stack_neighbor_idxs: (max_length_of_neighbor_idxs)
// start_len: (M1 + M2, 2) [start_offset, neighbor_length]
int grid_idx = blockIdx.y;
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (pt_idx >= M || grid_idx >= num_total_grids) return;
new_xyz += pt_idx * 3;
new_xyz_grid_centers += pt_idx * num_total_grids * 3 + grid_idx * 3;
new_xyz_grid_idxs += pt_idx * num_total_grids * 3 + grid_idx * 3;
new_xyz_grid_dist2 += pt_idx * num_total_grids * 3 + grid_idx * 3;
start_len += pt_idx * 2;
stack_neighbor_idxs += start_len[0];
int neighbor_length = start_len[1];
float center_x = new_xyz_grid_centers[0];
float center_y = new_xyz_grid_centers[1];
float center_z = new_xyz_grid_centers[2];
double best1 = 1e40, best2 = 1e40, best3 = 1e40;
int besti1 = -1, besti2 = -1, besti3 = -1;
for (int k = 0; k < neighbor_length; k++){
int cur_neighbor_idx = stack_neighbor_idxs[k];
float x = support_xyz[cur_neighbor_idx * 3 + 0];
float y = support_xyz[cur_neighbor_idx * 3 + 1];
float z = support_xyz[cur_neighbor_idx * 3 + 2];
float d = (center_x - x) * (center_x - x) + (center_y - y) * (center_y - y) + (center_z - z) * (center_z - z);
if (d < best1) {
best3 = best2; besti3 = besti2;
best2 = best1; besti2 = besti1;
best1 = d; besti1 = cur_neighbor_idx;
}
else if (d < best2) {
best3 = best2; besti3 = besti2;
best2 = d; besti2 = cur_neighbor_idx;
}
else if (d < best3) {
best3 = d; besti3 = cur_neighbor_idx;
}
}
if (besti2 == -1){
besti2 = besti1; best2 = best1;
}
if (besti3 == -1){
besti3 = besti1; best3 = best1;
}
new_xyz_grid_dist2[0] = best1;
new_xyz_grid_dist2[1] = best2;
new_xyz_grid_dist2[2] = best3;
new_xyz_grid_idxs[0] = besti1;
new_xyz_grid_idxs[1] = besti2;
new_xyz_grid_idxs[2] = besti3;
}
int query_three_nn_by_stacked_local_idxs_kernel_launcher_stack(
const float *support_xyz, const float *new_xyz, const float *new_xyz_grid_centers,
int *new_xyz_grid_idxs, float *new_xyz_grid_dist2,
const int *stack_neighbor_idxs, const int *start_len,
int M, int num_total_grids){
// support_xyz: (N1 + N2 ..., 3) xyz coordinates of the features
// new_xyz: (M1 + M2 ..., 3) centers of the ball query
// new_xyz_grid_centers: (M1 + M2 ..., num_total_grids, 3) grids centers of each grid
// new_xyz_grid_idxs: (M1 + M2 ..., num_total_grids, 3) three-nn
// new_xyz_grid_dist2: (M1 + M2 ..., num_total_grids, 3) square of dist of three-nn
// stack_neighbor_idxs: (max_length_of_neighbor_idxs)
// start_len: (M1 + M2, 2) [start_offset, neighbor_length]
hipError_t err;
dim3 blocks(DIVUP(M, THREADS_PER_BLOCK), num_total_grids); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
hipLaunchKernelGGL(( query_three_nn_by_stacked_local_idxs_kernel), dim3(blocks), dim3(threads), 0, 0,
support_xyz, new_xyz, new_xyz_grid_centers,
new_xyz_grid_idxs, new_xyz_grid_dist2, stack_neighbor_idxs, start_len,
M, num_total_grids
);
// hipDeviceSynchronize(); // for using printf in kernel function
err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err));
exit(-1);
}
return 0;
}
__global__ void query_stacked_local_neighbor_idxs_kernel(
const float *support_xyz, const int *xyz_batch_cnt, const float *new_xyz, const int *new_xyz_batch_cnt,
int *stack_neighbor_idxs, int *start_len, int *cumsum, int avg_length_of_neighbor_idxs,
float max_neighbour_distance, int batch_size, int M, int nsample, int neighbor_type){
// support_xyz: (N1 + N2 ..., 3) xyz coordinates of the features
// xyz_batch_cnt: (batch_size), [N1, N2, ...]
// new_xyz: (M1 + M2 ..., 3) centers of the ball query
// new_xyz_batch_cnt: (batch_size), [M1, M2, ...]
// stack_neighbor_idxs: (max_length_of_neighbor_idxs)
// start_len: (M1 + M2, 2) [start_offset, neighbor_length]
// cumsum: (1), max offset of current data in stack_neighbor_idxs
// max_neighbour_distance: float
// nsample: find all (-1), find limited number(>0)
// neighbor_type: 1: ball, others: cube
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (pt_idx >= M) return;
int bs_idx = 0, pt_cnt = new_xyz_batch_cnt[0];
for (int k = 1; k < batch_size; k++){
if (pt_idx < pt_cnt) break;
pt_cnt += new_xyz_batch_cnt[k];
bs_idx = k;
}
int xyz_batch_start_idx = 0;
for (int k = 0; k < bs_idx; k++) xyz_batch_start_idx += xyz_batch_cnt[k];
support_xyz += xyz_batch_start_idx * 3;
new_xyz += pt_idx * 3;
start_len += pt_idx * 2;
float new_x = new_xyz[0];
float new_y = new_xyz[1];
float new_z = new_xyz[2];
int n = xyz_batch_cnt[bs_idx];
float local_x, local_y, local_z;
float radius2 = max_neighbour_distance * max_neighbour_distance;
int temp_idxs[1000];
int sample_cnt = 0;
for (int k = 0; k < n; ++k) {
local_x = support_xyz[k * 3 + 0] - new_x;
local_y = support_xyz[k * 3 + 1] - new_y;
local_z = support_xyz[k * 3 + 2] - new_z;
if (neighbor_type == 1){
// ball
if (local_x * local_x + local_y * local_y + local_z * local_z > radius2){
continue;
}
}
else{
// voxel
if ((fabs(local_x) > max_neighbour_distance) |
(fabs(local_y) > max_neighbour_distance) |
(fabs(local_z) > max_neighbour_distance)){
continue;
}
}
if (sample_cnt < 1000){
temp_idxs[sample_cnt] = k;
}
else{
break;
}
sample_cnt++;
if (nsample > 0 && sample_cnt >= nsample) break;
}
start_len[0] = atomicAdd(cumsum, sample_cnt);
start_len[1] = sample_cnt;
int max_thresh = avg_length_of_neighbor_idxs * M;
if (start_len[0] >= max_thresh) return;
stack_neighbor_idxs += start_len[0];
if (start_len[0] + sample_cnt >= max_thresh) sample_cnt = max_thresh - start_len[0];
for (int k = 0; k < sample_cnt; k++){
stack_neighbor_idxs[k] = temp_idxs[k] + xyz_batch_start_idx;
}
}
int query_stacked_local_neighbor_idxs_kernel_launcher_stack(
const float *support_xyz, const int *xyz_batch_cnt, const float *new_xyz, const int *new_xyz_batch_cnt,
int *stack_neighbor_idxs, int *start_len, int *cumsum, int avg_length_of_neighbor_idxs,
float max_neighbour_distance, int batch_size, int M, int nsample, int neighbor_type){
// support_xyz: (N1 + N2 ..., 3) xyz coordinates of the features
// xyz_batch_cnt: (batch_size), [N1, N2, ...]
// new_xyz: (M1 + M2 ..., 3) centers of the ball query
// new_xyz_batch_cnt: (batch_size), [M1, M2, ...]
// stack_neighbor_idxs: (max_length_of_neighbor_idxs)
// start_len: (M1 + M2, 2) [start_offset, neighbor_length]
// cumsum: (1), max offset of current data in stack_neighbor_idxs
// max_neighbour_distance: float
// nsample: find all (-1), find limited number(>0)
// neighbor_type: 1: ball, others: cube
hipError_t err;
dim3 blocks(DIVUP(M, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
hipLaunchKernelGGL(( query_stacked_local_neighbor_idxs_kernel), dim3(blocks), dim3(threads), 0, 0,
support_xyz, xyz_batch_cnt, new_xyz, new_xyz_batch_cnt,
stack_neighbor_idxs, start_len, cumsum, avg_length_of_neighbor_idxs,
max_neighbour_distance, batch_size, M, nsample, neighbor_type
);
// hipDeviceSynchronize(); // for using printf in kernel function
err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err));
exit(-1);
}
return 0;
}
__global__ void vector_pool_kernel_stack(
const float *support_xyz, const float *support_features, const int *xyz_batch_cnt,
const float *new_xyz, float *new_features, float *new_local_xyz, const int *new_xyz_batch_cnt,
int num_grid_x, int num_grid_y, int num_grid_z, float max_neighbour_distance,
int batch_size, int M, int num_c_in, int num_c_out,
int num_c_each_grid, int num_total_grids, int *point_cnt_of_grid, int *grouped_idxs,
int use_xyz, float grid_size_x, float grid_size_y,
float grid_size_z, int *cum_sum, int num_max_sum_points, int nsample, int neighbor_type, int pooling_type){
// support_xyz: (N1 + N2 ..., 3) xyz coordinates of the features
// support_features: (N1 + N2 ..., C)
// xyz_batch_cnt: (batch_size), [N1, N2, ...]
// new_xyz: (M1 + M2 ..., 3) centers of the ball query
// new_features: (M1 + M2 ..., C), C = num_total_grids * num_c_each_grid
// new_local_xyz: (M1 + M2 ..., 3 * num_total_grids)
// new_xyz_batch_cnt: (batch_size), [M1, M2, ...]
// num_grid_x, num_grid_y, num_grid_z: number of grids in each local area centered at new_xyz
// point_cnt_of_grid: (M1 + M2 ..., num_total_grids)
// grouped_idxs: (num_max_sum_points, 3)[idx of support_xyz, idx of new_xyz, idx of grid_idx in new_xyz]
// use_xyz: whether to calculate new_local_xyz
// neighbor_type: 1: ball, others: cube
// pooling_type: 0: avg_pool, 1: random choice
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (pt_idx >= M) return;
int bs_idx = 0, pt_cnt = new_xyz_batch_cnt[0];
for (int k = 1; k < batch_size; k++){
if (pt_idx < pt_cnt) break;
pt_cnt += new_xyz_batch_cnt[k];
bs_idx = k;
}
int xyz_batch_start_idx = 0;
for (int k = 0; k < bs_idx; k++) xyz_batch_start_idx += xyz_batch_cnt[k];
support_xyz += xyz_batch_start_idx * 3;
support_features += xyz_batch_start_idx * num_c_in;
new_xyz += pt_idx * 3;
new_features += pt_idx * num_c_out;
point_cnt_of_grid += pt_idx * num_total_grids;
new_local_xyz += pt_idx * 3 * num_total_grids;
float new_x = new_xyz[0];
float new_y = new_xyz[1];
float new_z = new_xyz[2];
int n = xyz_batch_cnt[bs_idx], grid_idx_x, grid_idx_y, grid_idx_z, grid_idx;
float local_x, local_y, local_z;
float radius2 = max_neighbour_distance * max_neighbour_distance;
int sample_cnt = 0;
for (int k = 0; k < n; ++k) {
local_x = support_xyz[k * 3 + 0] - new_x;
local_y = support_xyz[k * 3 + 1] - new_y;
local_z = support_xyz[k * 3 + 2] - new_z;
if (neighbor_type == 1){
// ball
if (local_x * local_x + local_y * local_y + local_z * local_z > radius2){
continue;
}
}
else{
// voxel
if ((fabs(local_x) > max_neighbour_distance) |
(fabs(local_y) > max_neighbour_distance) |
(fabs(local_z) > max_neighbour_distance)){
continue;
}
}
grid_idx_x = floorf((local_x + max_neighbour_distance) / grid_size_x);
grid_idx_y = floorf((local_y + max_neighbour_distance) / grid_size_y);
grid_idx_z = floorf((local_z + max_neighbour_distance) / grid_size_z);
grid_idx = grid_idx_x * num_grid_y * num_grid_z + grid_idx_y * num_grid_z + grid_idx_z;
grid_idx = min(max(grid_idx, 0), num_total_grids - 1);
if (pooling_type == 0){
// avg pooling
point_cnt_of_grid[grid_idx] ++;
for (int i = 0; i < num_c_in; i++){
new_features[grid_idx * num_c_each_grid + i % num_c_each_grid] += support_features[k * num_c_in + i];
}
if (use_xyz){
new_local_xyz[grid_idx * 3 + 0] += local_x;
new_local_xyz[grid_idx * 3 + 1] += local_y;
new_local_xyz[grid_idx * 3 + 2] += local_z;
}
int cnt = atomicAdd(cum_sum, 1);
if (cnt >= num_max_sum_points) continue; // continue to statistics the max number of points
grouped_idxs[cnt * 3 + 0] = xyz_batch_start_idx + k;
grouped_idxs[cnt * 3 + 1] = pt_idx;
grouped_idxs[cnt * 3 + 2] = grid_idx;
sample_cnt++;
if(nsample > 0 && sample_cnt >= nsample) break;
}
else if (pooling_type == 1){
// random choose one within sub-voxel
// printf("new_xyz=(%.2f, %.2f, %.2f, ), find neighbor k=%d: support_xyz=(%.2f, %.2f, %.2f), local_xyz=(%.2f, %.2f, %.2f), neighbor=%.2f, grid_idx=%d, point_cnt_of_grid_idx=%d\n",
// new_x, new_y, new_z, k, support_xyz[k * 3 + 0], support_xyz[k * 3 + 1], support_xyz[k * 3 + 2], local_x, local_y, local_z, max_neighbour_distance, grid_idx, point_cnt_of_grid[grid_idx]);
if (point_cnt_of_grid[grid_idx] == 0){
point_cnt_of_grid[grid_idx] ++;
for (int i = 0; i < num_c_in; i++){
new_features[grid_idx * num_c_each_grid + i % num_c_each_grid] = support_features[k * num_c_in + i];
}
if (use_xyz){
new_local_xyz[grid_idx * 3 + 0] = local_x;
new_local_xyz[grid_idx * 3 + 1] = local_y;
new_local_xyz[grid_idx * 3 + 2] = local_z;
}
int cnt = atomicAdd(cum_sum, 1);
if (cnt >= num_max_sum_points) continue; // continue to statistics the max number of points
grouped_idxs[cnt * 3 + 0] = xyz_batch_start_idx + k;
grouped_idxs[cnt * 3 + 1] = pt_idx;
grouped_idxs[cnt * 3 + 2] = grid_idx;
sample_cnt++;
if(nsample > 0 && sample_cnt >= nsample || sample_cnt >= num_total_grids) break;
}
}
}
}
int vector_pool_kernel_launcher_stack(
const float *support_xyz, const float *support_features, const int *xyz_batch_cnt,
const float *new_xyz, float *new_features, float *new_local_xyz, const int *new_xyz_batch_cnt,
int *point_cnt_of_grid, int *grouped_idxs,
int num_grid_x, int num_grid_y, int num_grid_z, float max_neighbour_distance,
int batch_size, int N, int M, int num_c_in, int num_c_out, int num_total_grids,
int use_xyz, int num_max_sum_points, int nsample, int neighbor_type, int pooling_type){
// support_xyz: (N1 + N2 ..., 3) xyz coordinates of the features
// support_features: (N1 + N2 ..., C)
// xyz_batch_cnt: (batch_size), [N1, N2, ...]
// new_xyz: (M1 + M2 ..., 3) centers of the ball query
// new_features: (M1 + M2 ..., C)
// new_local_xyz: (M1 + M2 ..., 3)
// new_xyz_batch_cnt: (batch_size), [M1, M2, ...]
// num_grid_x, num_grid_y, num_grid_z: number of grids in each local area centered at new_xyz
// use_xyz: whether to calculate new_local_xyz
// grouped_idxs: (num_max_sum_points, 3)[idx of support_xyz, idx of new_xyz, idx of grid_idx in new_xyz]
// neighbor_type: 1: ball, others: cube
// pooling_type: 0: avg_pool, 1: random choice
hipError_t err;
int num_c_each_grid = num_c_out / num_total_grids;
float grid_size_x = max_neighbour_distance * 2 / num_grid_x;
float grid_size_y = max_neighbour_distance * 2 / num_grid_y;
float grid_size_z = max_neighbour_distance * 2 / num_grid_z;
dim3 blocks(DIVUP(M, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
int cum_sum = 0;
int *p_cum_sum;
hipMalloc((void**)&p_cum_sum, sizeof(int));
hipMemcpy(p_cum_sum, &cum_sum, sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( vector_pool_kernel_stack), dim3(blocks), dim3(threads), 0, 0,
support_xyz, support_features, xyz_batch_cnt,
new_xyz, new_features, new_local_xyz, new_xyz_batch_cnt,
num_grid_x, num_grid_y, num_grid_z, max_neighbour_distance,
batch_size, M, num_c_in, num_c_out,
num_c_each_grid, num_total_grids, point_cnt_of_grid, grouped_idxs,
use_xyz, grid_size_x, grid_size_y, grid_size_z, p_cum_sum, num_max_sum_points,
nsample, neighbor_type, pooling_type
);
hipMemcpy(&cum_sum, p_cum_sum, sizeof(int), hipMemcpyDeviceToHost);
// hipDeviceSynchronize(); // for using printf in kernel function
err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err));
exit(-1);
}
return cum_sum;
}
__global__ void vector_pool_grad_kernel_stack(const float *grad_new_features,
const int *point_cnt_of_grid, const int *grouped_idxs,
float *grad_support_features, int N, int M, int num_c_out, int num_c_in,
int num_c_each_grid, int num_total_grids, int num_max_sum_points){
// grad_new_features: (M1 + M2 ..., C_out)
// point_cnt_of_grid: (M1 + M2 ..., num_total_grids)
// grouped_idxs: (num_max_sum_points, 3) [idx of support_xyz, idx of new_xyz, idx of grid_idx in new_xyz]
// grad_support_features: (N1 + N2 ..., C_in)
int channel_idx = blockIdx.y;
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_max_sum_points || channel_idx >= num_c_in) return;
int idx_of_support_xyz = grouped_idxs[index * 3 + 0];
int idx_of_new_xyz = grouped_idxs[index * 3 + 1];
int idx_of_grid_idx = grouped_idxs[index * 3 + 2];
int num_total_pts = point_cnt_of_grid[idx_of_new_xyz * num_total_grids + idx_of_grid_idx];
grad_support_features += idx_of_support_xyz * num_c_in + channel_idx;
grad_new_features += idx_of_new_xyz * num_c_out + idx_of_grid_idx * num_c_each_grid;
int channel_idx_of_cin = channel_idx % num_c_each_grid;
float cur_grad = 1 / fmaxf(float(num_total_pts), 1.0);
atomicAdd(grad_support_features, grad_new_features[channel_idx_of_cin] * cur_grad);
}
void vector_pool_grad_kernel_launcher_stack(
const float *grad_new_features, const int *point_cnt_of_grid, const int *grouped_idxs,
float *grad_support_features, int N, int M, int num_c_out, int num_c_in, int num_total_grids,
int num_max_sum_points){
// grad_new_features: (M1 + M2 ..., C_out)
// point_cnt_of_grid: (M1 + M2 ..., num_total_grids)
// grouped_idxs: (num_max_sum_points, 3) [idx of support_xyz, idx of new_xyz, idx of grid_idx in new_xyz]
// grad_support_features: (N1 + N2 ..., C_in)
int num_c_each_grid = num_c_out / num_total_grids;
hipError_t err;
dim3 blocks(DIVUP(num_max_sum_points, THREADS_PER_BLOCK), num_c_in); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
hipLaunchKernelGGL(( vector_pool_grad_kernel_stack), dim3(blocks), dim3(threads), 0, 0,
grad_new_features, point_cnt_of_grid, grouped_idxs, grad_support_features,
N, M, num_c_out, num_c_in, num_c_each_grid, num_total_grids, num_max_sum_points
);
// hipDeviceSynchronize(); // for using printf in kernel function
err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err));
exit(-1);
}
} | 52a974896ee2b322d3c9fd10d7bb9e11e6428bff.cu | /*
Vector-pool aggregation based local feature aggregation for point cloud.
PV-RCNN++: Point-Voxel Feature Set Abstraction With Local Vector Representation for 3D Object Detection
https://arxiv.org/abs/2102.00463
Written by Shaoshuai Shi
All Rights Reserved 2020.
*/
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "vector_pool_gpu.h"
#include "cuda_utils.h"
__global__ void query_three_nn_by_stacked_local_idxs_kernel(
const float *support_xyz, const float *new_xyz, const float *new_xyz_grid_centers,
int *new_xyz_grid_idxs, float *new_xyz_grid_dist2,
const int *stack_neighbor_idxs, const int *start_len,
int M, int num_total_grids){
// support_xyz: (N1 + N2 ..., 3) xyz coordinates of the features
// new_xyz: (M1 + M2 ..., 3) centers of the ball query
// new_xyz_grid_centers: (M1 + M2 ..., num_total_grids, 3) grids centers of each grid
// new_xyz_grid_idxs: (M1 + M2 ..., num_total_grids, 3) three-nn
// new_xyz_grid_dist2: (M1 + M2 ..., num_total_grids, 3) square of dist of three-nn
// stack_neighbor_idxs: (max_length_of_neighbor_idxs)
// start_len: (M1 + M2, 2) [start_offset, neighbor_length]
int grid_idx = blockIdx.y;
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (pt_idx >= M || grid_idx >= num_total_grids) return;
new_xyz += pt_idx * 3;
new_xyz_grid_centers += pt_idx * num_total_grids * 3 + grid_idx * 3;
new_xyz_grid_idxs += pt_idx * num_total_grids * 3 + grid_idx * 3;
new_xyz_grid_dist2 += pt_idx * num_total_grids * 3 + grid_idx * 3;
start_len += pt_idx * 2;
stack_neighbor_idxs += start_len[0];
int neighbor_length = start_len[1];
float center_x = new_xyz_grid_centers[0];
float center_y = new_xyz_grid_centers[1];
float center_z = new_xyz_grid_centers[2];
double best1 = 1e40, best2 = 1e40, best3 = 1e40;
int besti1 = -1, besti2 = -1, besti3 = -1;
for (int k = 0; k < neighbor_length; k++){
int cur_neighbor_idx = stack_neighbor_idxs[k];
float x = support_xyz[cur_neighbor_idx * 3 + 0];
float y = support_xyz[cur_neighbor_idx * 3 + 1];
float z = support_xyz[cur_neighbor_idx * 3 + 2];
float d = (center_x - x) * (center_x - x) + (center_y - y) * (center_y - y) + (center_z - z) * (center_z - z);
if (d < best1) {
best3 = best2; besti3 = besti2;
best2 = best1; besti2 = besti1;
best1 = d; besti1 = cur_neighbor_idx;
}
else if (d < best2) {
best3 = best2; besti3 = besti2;
best2 = d; besti2 = cur_neighbor_idx;
}
else if (d < best3) {
best3 = d; besti3 = cur_neighbor_idx;
}
}
if (besti2 == -1){
besti2 = besti1; best2 = best1;
}
if (besti3 == -1){
besti3 = besti1; best3 = best1;
}
new_xyz_grid_dist2[0] = best1;
new_xyz_grid_dist2[1] = best2;
new_xyz_grid_dist2[2] = best3;
new_xyz_grid_idxs[0] = besti1;
new_xyz_grid_idxs[1] = besti2;
new_xyz_grid_idxs[2] = besti3;
}
int query_three_nn_by_stacked_local_idxs_kernel_launcher_stack(
const float *support_xyz, const float *new_xyz, const float *new_xyz_grid_centers,
int *new_xyz_grid_idxs, float *new_xyz_grid_dist2,
const int *stack_neighbor_idxs, const int *start_len,
int M, int num_total_grids){
// support_xyz: (N1 + N2 ..., 3) xyz coordinates of the features
// new_xyz: (M1 + M2 ..., 3) centers of the ball query
// new_xyz_grid_centers: (M1 + M2 ..., num_total_grids, 3) grids centers of each grid
// new_xyz_grid_idxs: (M1 + M2 ..., num_total_grids, 3) three-nn
// new_xyz_grid_dist2: (M1 + M2 ..., num_total_grids, 3) square of dist of three-nn
// stack_neighbor_idxs: (max_length_of_neighbor_idxs)
// start_len: (M1 + M2, 2) [start_offset, neighbor_length]
cudaError_t err;
dim3 blocks(DIVUP(M, THREADS_PER_BLOCK), num_total_grids); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
query_three_nn_by_stacked_local_idxs_kernel<<<blocks, threads>>>(
support_xyz, new_xyz, new_xyz_grid_centers,
new_xyz_grid_idxs, new_xyz_grid_dist2, stack_neighbor_idxs, start_len,
M, num_total_grids
);
// cudaDeviceSynchronize(); // for using printf in kernel function
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
return 0;
}
__global__ void query_stacked_local_neighbor_idxs_kernel(
const float *support_xyz, const int *xyz_batch_cnt, const float *new_xyz, const int *new_xyz_batch_cnt,
int *stack_neighbor_idxs, int *start_len, int *cumsum, int avg_length_of_neighbor_idxs,
float max_neighbour_distance, int batch_size, int M, int nsample, int neighbor_type){
// support_xyz: (N1 + N2 ..., 3) xyz coordinates of the features
// xyz_batch_cnt: (batch_size), [N1, N2, ...]
// new_xyz: (M1 + M2 ..., 3) centers of the ball query
// new_xyz_batch_cnt: (batch_size), [M1, M2, ...]
// stack_neighbor_idxs: (max_length_of_neighbor_idxs)
// start_len: (M1 + M2, 2) [start_offset, neighbor_length]
// cumsum: (1), max offset of current data in stack_neighbor_idxs
// max_neighbour_distance: float
// nsample: find all (-1), find limited number(>0)
// neighbor_type: 1: ball, others: cube
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (pt_idx >= M) return;
int bs_idx = 0, pt_cnt = new_xyz_batch_cnt[0];
for (int k = 1; k < batch_size; k++){
if (pt_idx < pt_cnt) break;
pt_cnt += new_xyz_batch_cnt[k];
bs_idx = k;
}
int xyz_batch_start_idx = 0;
for (int k = 0; k < bs_idx; k++) xyz_batch_start_idx += xyz_batch_cnt[k];
support_xyz += xyz_batch_start_idx * 3;
new_xyz += pt_idx * 3;
start_len += pt_idx * 2;
float new_x = new_xyz[0];
float new_y = new_xyz[1];
float new_z = new_xyz[2];
int n = xyz_batch_cnt[bs_idx];
float local_x, local_y, local_z;
float radius2 = max_neighbour_distance * max_neighbour_distance;
int temp_idxs[1000];
int sample_cnt = 0;
for (int k = 0; k < n; ++k) {
local_x = support_xyz[k * 3 + 0] - new_x;
local_y = support_xyz[k * 3 + 1] - new_y;
local_z = support_xyz[k * 3 + 2] - new_z;
if (neighbor_type == 1){
// ball
if (local_x * local_x + local_y * local_y + local_z * local_z > radius2){
continue;
}
}
else{
// voxel
if ((fabs(local_x) > max_neighbour_distance) |
(fabs(local_y) > max_neighbour_distance) |
(fabs(local_z) > max_neighbour_distance)){
continue;
}
}
if (sample_cnt < 1000){
temp_idxs[sample_cnt] = k;
}
else{
break;
}
sample_cnt++;
if (nsample > 0 && sample_cnt >= nsample) break;
}
start_len[0] = atomicAdd(cumsum, sample_cnt);
start_len[1] = sample_cnt;
int max_thresh = avg_length_of_neighbor_idxs * M;
if (start_len[0] >= max_thresh) return;
stack_neighbor_idxs += start_len[0];
if (start_len[0] + sample_cnt >= max_thresh) sample_cnt = max_thresh - start_len[0];
for (int k = 0; k < sample_cnt; k++){
stack_neighbor_idxs[k] = temp_idxs[k] + xyz_batch_start_idx;
}
}
int query_stacked_local_neighbor_idxs_kernel_launcher_stack(
const float *support_xyz, const int *xyz_batch_cnt, const float *new_xyz, const int *new_xyz_batch_cnt,
int *stack_neighbor_idxs, int *start_len, int *cumsum, int avg_length_of_neighbor_idxs,
float max_neighbour_distance, int batch_size, int M, int nsample, int neighbor_type){
// support_xyz: (N1 + N2 ..., 3) xyz coordinates of the features
// xyz_batch_cnt: (batch_size), [N1, N2, ...]
// new_xyz: (M1 + M2 ..., 3) centers of the ball query
// new_xyz_batch_cnt: (batch_size), [M1, M2, ...]
// stack_neighbor_idxs: (max_length_of_neighbor_idxs)
// start_len: (M1 + M2, 2) [start_offset, neighbor_length]
// cumsum: (1), max offset of current data in stack_neighbor_idxs
// max_neighbour_distance: float
// nsample: find all (-1), find limited number(>0)
// neighbor_type: 1: ball, others: cube
cudaError_t err;
dim3 blocks(DIVUP(M, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
query_stacked_local_neighbor_idxs_kernel<<<blocks, threads>>>(
support_xyz, xyz_batch_cnt, new_xyz, new_xyz_batch_cnt,
stack_neighbor_idxs, start_len, cumsum, avg_length_of_neighbor_idxs,
max_neighbour_distance, batch_size, M, nsample, neighbor_type
);
// cudaDeviceSynchronize(); // for using printf in kernel function
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
return 0;
}
__global__ void vector_pool_kernel_stack(
const float *support_xyz, const float *support_features, const int *xyz_batch_cnt,
const float *new_xyz, float *new_features, float *new_local_xyz, const int *new_xyz_batch_cnt,
int num_grid_x, int num_grid_y, int num_grid_z, float max_neighbour_distance,
int batch_size, int M, int num_c_in, int num_c_out,
int num_c_each_grid, int num_total_grids, int *point_cnt_of_grid, int *grouped_idxs,
int use_xyz, float grid_size_x, float grid_size_y,
float grid_size_z, int *cum_sum, int num_max_sum_points, int nsample, int neighbor_type, int pooling_type){
// support_xyz: (N1 + N2 ..., 3) xyz coordinates of the features
// support_features: (N1 + N2 ..., C)
// xyz_batch_cnt: (batch_size), [N1, N2, ...]
// new_xyz: (M1 + M2 ..., 3) centers of the ball query
// new_features: (M1 + M2 ..., C), C = num_total_grids * num_c_each_grid
// new_local_xyz: (M1 + M2 ..., 3 * num_total_grids)
// new_xyz_batch_cnt: (batch_size), [M1, M2, ...]
// num_grid_x, num_grid_y, num_grid_z: number of grids in each local area centered at new_xyz
// point_cnt_of_grid: (M1 + M2 ..., num_total_grids)
// grouped_idxs: (num_max_sum_points, 3)[idx of support_xyz, idx of new_xyz, idx of grid_idx in new_xyz]
// use_xyz: whether to calculate new_local_xyz
// neighbor_type: 1: ball, others: cube
// pooling_type: 0: avg_pool, 1: random choice
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (pt_idx >= M) return;
int bs_idx = 0, pt_cnt = new_xyz_batch_cnt[0];
for (int k = 1; k < batch_size; k++){
if (pt_idx < pt_cnt) break;
pt_cnt += new_xyz_batch_cnt[k];
bs_idx = k;
}
int xyz_batch_start_idx = 0;
for (int k = 0; k < bs_idx; k++) xyz_batch_start_idx += xyz_batch_cnt[k];
support_xyz += xyz_batch_start_idx * 3;
support_features += xyz_batch_start_idx * num_c_in;
new_xyz += pt_idx * 3;
new_features += pt_idx * num_c_out;
point_cnt_of_grid += pt_idx * num_total_grids;
new_local_xyz += pt_idx * 3 * num_total_grids;
float new_x = new_xyz[0];
float new_y = new_xyz[1];
float new_z = new_xyz[2];
int n = xyz_batch_cnt[bs_idx], grid_idx_x, grid_idx_y, grid_idx_z, grid_idx;
float local_x, local_y, local_z;
float radius2 = max_neighbour_distance * max_neighbour_distance;
int sample_cnt = 0;
for (int k = 0; k < n; ++k) {
local_x = support_xyz[k * 3 + 0] - new_x;
local_y = support_xyz[k * 3 + 1] - new_y;
local_z = support_xyz[k * 3 + 2] - new_z;
if (neighbor_type == 1){
// ball
if (local_x * local_x + local_y * local_y + local_z * local_z > radius2){
continue;
}
}
else{
// voxel
if ((fabs(local_x) > max_neighbour_distance) |
(fabs(local_y) > max_neighbour_distance) |
(fabs(local_z) > max_neighbour_distance)){
continue;
}
}
grid_idx_x = floorf((local_x + max_neighbour_distance) / grid_size_x);
grid_idx_y = floorf((local_y + max_neighbour_distance) / grid_size_y);
grid_idx_z = floorf((local_z + max_neighbour_distance) / grid_size_z);
grid_idx = grid_idx_x * num_grid_y * num_grid_z + grid_idx_y * num_grid_z + grid_idx_z;
grid_idx = min(max(grid_idx, 0), num_total_grids - 1);
if (pooling_type == 0){
// avg pooling
point_cnt_of_grid[grid_idx] ++;
for (int i = 0; i < num_c_in; i++){
new_features[grid_idx * num_c_each_grid + i % num_c_each_grid] += support_features[k * num_c_in + i];
}
if (use_xyz){
new_local_xyz[grid_idx * 3 + 0] += local_x;
new_local_xyz[grid_idx * 3 + 1] += local_y;
new_local_xyz[grid_idx * 3 + 2] += local_z;
}
int cnt = atomicAdd(cum_sum, 1);
if (cnt >= num_max_sum_points) continue; // continue to statistics the max number of points
grouped_idxs[cnt * 3 + 0] = xyz_batch_start_idx + k;
grouped_idxs[cnt * 3 + 1] = pt_idx;
grouped_idxs[cnt * 3 + 2] = grid_idx;
sample_cnt++;
if(nsample > 0 && sample_cnt >= nsample) break;
}
else if (pooling_type == 1){
// random choose one within sub-voxel
// printf("new_xyz=(%.2f, %.2f, %.2f, ), find neighbor k=%d: support_xyz=(%.2f, %.2f, %.2f), local_xyz=(%.2f, %.2f, %.2f), neighbor=%.2f, grid_idx=%d, point_cnt_of_grid_idx=%d\n",
// new_x, new_y, new_z, k, support_xyz[k * 3 + 0], support_xyz[k * 3 + 1], support_xyz[k * 3 + 2], local_x, local_y, local_z, max_neighbour_distance, grid_idx, point_cnt_of_grid[grid_idx]);
if (point_cnt_of_grid[grid_idx] == 0){
point_cnt_of_grid[grid_idx] ++;
for (int i = 0; i < num_c_in; i++){
new_features[grid_idx * num_c_each_grid + i % num_c_each_grid] = support_features[k * num_c_in + i];
}
if (use_xyz){
new_local_xyz[grid_idx * 3 + 0] = local_x;
new_local_xyz[grid_idx * 3 + 1] = local_y;
new_local_xyz[grid_idx * 3 + 2] = local_z;
}
int cnt = atomicAdd(cum_sum, 1);
if (cnt >= num_max_sum_points) continue; // continue to statistics the max number of points
grouped_idxs[cnt * 3 + 0] = xyz_batch_start_idx + k;
grouped_idxs[cnt * 3 + 1] = pt_idx;
grouped_idxs[cnt * 3 + 2] = grid_idx;
sample_cnt++;
if(nsample > 0 && sample_cnt >= nsample || sample_cnt >= num_total_grids) break;
}
}
}
}
int vector_pool_kernel_launcher_stack(
const float *support_xyz, const float *support_features, const int *xyz_batch_cnt,
const float *new_xyz, float *new_features, float *new_local_xyz, const int *new_xyz_batch_cnt,
int *point_cnt_of_grid, int *grouped_idxs,
int num_grid_x, int num_grid_y, int num_grid_z, float max_neighbour_distance,
int batch_size, int N, int M, int num_c_in, int num_c_out, int num_total_grids,
int use_xyz, int num_max_sum_points, int nsample, int neighbor_type, int pooling_type){
// support_xyz: (N1 + N2 ..., 3) xyz coordinates of the features
// support_features: (N1 + N2 ..., C)
// xyz_batch_cnt: (batch_size), [N1, N2, ...]
// new_xyz: (M1 + M2 ..., 3) centers of the ball query
// new_features: (M1 + M2 ..., C)
// new_local_xyz: (M1 + M2 ..., 3)
// new_xyz_batch_cnt: (batch_size), [M1, M2, ...]
// num_grid_x, num_grid_y, num_grid_z: number of grids in each local area centered at new_xyz
// use_xyz: whether to calculate new_local_xyz
// grouped_idxs: (num_max_sum_points, 3)[idx of support_xyz, idx of new_xyz, idx of grid_idx in new_xyz]
// neighbor_type: 1: ball, others: cube
// pooling_type: 0: avg_pool, 1: random choice
cudaError_t err;
int num_c_each_grid = num_c_out / num_total_grids;
float grid_size_x = max_neighbour_distance * 2 / num_grid_x;
float grid_size_y = max_neighbour_distance * 2 / num_grid_y;
float grid_size_z = max_neighbour_distance * 2 / num_grid_z;
dim3 blocks(DIVUP(M, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
int cum_sum = 0;
int *p_cum_sum;
cudaMalloc((void**)&p_cum_sum, sizeof(int));
cudaMemcpy(p_cum_sum, &cum_sum, sizeof(int), cudaMemcpyHostToDevice);
vector_pool_kernel_stack<<<blocks, threads>>>(
support_xyz, support_features, xyz_batch_cnt,
new_xyz, new_features, new_local_xyz, new_xyz_batch_cnt,
num_grid_x, num_grid_y, num_grid_z, max_neighbour_distance,
batch_size, M, num_c_in, num_c_out,
num_c_each_grid, num_total_grids, point_cnt_of_grid, grouped_idxs,
use_xyz, grid_size_x, grid_size_y, grid_size_z, p_cum_sum, num_max_sum_points,
nsample, neighbor_type, pooling_type
);
cudaMemcpy(&cum_sum, p_cum_sum, sizeof(int), cudaMemcpyDeviceToHost);
// cudaDeviceSynchronize(); // for using printf in kernel function
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
return cum_sum;
}
__global__ void vector_pool_grad_kernel_stack(const float *grad_new_features,
const int *point_cnt_of_grid, const int *grouped_idxs,
float *grad_support_features, int N, int M, int num_c_out, int num_c_in,
int num_c_each_grid, int num_total_grids, int num_max_sum_points){
// grad_new_features: (M1 + M2 ..., C_out)
// point_cnt_of_grid: (M1 + M2 ..., num_total_grids)
// grouped_idxs: (num_max_sum_points, 3) [idx of support_xyz, idx of new_xyz, idx of grid_idx in new_xyz]
// grad_support_features: (N1 + N2 ..., C_in)
int channel_idx = blockIdx.y;
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_max_sum_points || channel_idx >= num_c_in) return;
int idx_of_support_xyz = grouped_idxs[index * 3 + 0];
int idx_of_new_xyz = grouped_idxs[index * 3 + 1];
int idx_of_grid_idx = grouped_idxs[index * 3 + 2];
int num_total_pts = point_cnt_of_grid[idx_of_new_xyz * num_total_grids + idx_of_grid_idx];
grad_support_features += idx_of_support_xyz * num_c_in + channel_idx;
grad_new_features += idx_of_new_xyz * num_c_out + idx_of_grid_idx * num_c_each_grid;
int channel_idx_of_cin = channel_idx % num_c_each_grid;
float cur_grad = 1 / fmaxf(float(num_total_pts), 1.0);
atomicAdd(grad_support_features, grad_new_features[channel_idx_of_cin] * cur_grad);
}
void vector_pool_grad_kernel_launcher_stack(
const float *grad_new_features, const int *point_cnt_of_grid, const int *grouped_idxs,
float *grad_support_features, int N, int M, int num_c_out, int num_c_in, int num_total_grids,
int num_max_sum_points){
// grad_new_features: (M1 + M2 ..., C_out)
// point_cnt_of_grid: (M1 + M2 ..., num_total_grids)
// grouped_idxs: (num_max_sum_points, 3) [idx of support_xyz, idx of new_xyz, idx of grid_idx in new_xyz]
// grad_support_features: (N1 + N2 ..., C_in)
int num_c_each_grid = num_c_out / num_total_grids;
cudaError_t err;
dim3 blocks(DIVUP(num_max_sum_points, THREADS_PER_BLOCK), num_c_in); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
vector_pool_grad_kernel_stack<<<blocks, threads>>>(
grad_new_features, point_cnt_of_grid, grouped_idxs, grad_support_features,
N, M, num_c_out, num_c_in, num_c_each_grid, num_total_grids, num_max_sum_points
);
// cudaDeviceSynchronize(); // for using printf in kernel function
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
} |
cf8643d7db80e62680a0b4538d61e495b3e3967f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__device__ void recover_results(short *results, const int search_depth, const int total_dl_matrix_row_num) {
for (int i = threadIdx.x; i < total_dl_matrix_row_num; i = i + blockDim.x) {
if (results[i] == search_depth) {
results[i] = 0;
}
}
}
__global__ void recover_results(int *results, const int search_depth, const int total_dl_matrix_row_num) {
for (int i = threadIdx.x; i < total_dl_matrix_row_num; i = i + blockDim.x) {
if (results[i] == search_depth) {
results[i] = 0;
}
}
} | cf8643d7db80e62680a0b4538d61e495b3e3967f.cu | #include "includes.h"
__device__ void recover_results(short *results, const int search_depth, const int total_dl_matrix_row_num) {
for (int i = threadIdx.x; i < total_dl_matrix_row_num; i = i + blockDim.x) {
if (results[i] == search_depth) {
results[i] = 0;
}
}
}
__global__ void recover_results(int *results, const int search_depth, const int total_dl_matrix_row_num) {
for (int i = threadIdx.x; i < total_dl_matrix_row_num; i = i + blockDim.x) {
if (results[i] == search_depth) {
results[i] = 0;
}
}
} |
749cf42cd06e272ab86e3c23066dcc4871692c98.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void add(int *a, int *b, int *c)
{
*c = *a + *b;
}
int main(void)
{
int a, b, c;
int *d_a, *d_b, *d_c;
int size = sizeof(int);
hipMalloc((void **)&d_a, size);
hipMalloc((void **)&d_b, size);
hipMalloc((void **)&d_c, size);
a = 2;
b = 7;
hipMemcpy(d_a, &a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, &b, size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( add), dim3(1),dim3(1), 0, 0, d_a, d_b, d_c);
hipMemcpy(&c, d_c, size, hipMemcpyDeviceToHost);
printf("sum is %d\n", c);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
return 0;
}
| 749cf42cd06e272ab86e3c23066dcc4871692c98.cu | #include <stdio.h>
__global__ void add(int *a, int *b, int *c)
{
*c = *a + *b;
}
int main(void)
{
int a, b, c;
int *d_a, *d_b, *d_c;
int size = sizeof(int);
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
a = 2;
b = 7;
cudaMemcpy(d_a, &a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, &b, size, cudaMemcpyHostToDevice);
add<<<1,1>>>(d_a, d_b, d_c);
cudaMemcpy(&c, d_c, size, cudaMemcpyDeviceToHost);
printf("sum is %d\n", c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
71b795f4c45c9199098d473f6797a6b052f1ce60.hip | // !!! This is a file automatically generated by hipify!!!
#include "TrigThresholdThrustFunctor.hh"
__device__ fptype threshCalc (fptype distance, fptype linConst) {
fptype ret = (distance > fptype(0.5) ? fptype(1) : (linConst + (1 - linConst) * SIN(distance * fptype(3.14159265))));
return ret;
}
__device__ fptype device_TrigThresholdUpper (fptype* evt, fptype* p, unsigned int* indices) {
fptype x = evt[indices[2 + indices[0]]];
fptype thresh = p[indices[1]];
fptype trigConst = p[indices[2]];
fptype linConst = p[indices[3]];
trigConst *= (thresh - x);
return threshCalc(trigConst, linConst);
}
__device__ fptype device_TrigThresholdLower (fptype* evt, fptype* p, unsigned int* indices) {
fptype x = evt[indices[2 + indices[0]]];
fptype thresh = p[indices[1]];
fptype trigConst = p[indices[2]];
fptype linConst = p[indices[3]];
trigConst *= (x - thresh);
return threshCalc(trigConst, linConst);
}
__device__ fptype device_VerySpecialEpisodeTrigThresholdUpper (fptype* evt, fptype* p, unsigned int* indices) {
// Annoying special case for use with Mikhail's efficiency function across the Dalitz plot
fptype x = evt[indices[2 + indices[0] + 0]];
fptype y = evt[indices[2 + indices[0] + 1]];
fptype thresh = p[indices[1]];
fptype trigConst = p[indices[2]];
fptype linConst = p[indices[3]];
fptype z = p[indices[4]] - x - y;
trigConst *= (thresh - z);
return threshCalc(trigConst, linConst);
}
__device__ fptype device_VerySpecialEpisodeTrigThresholdLower (fptype* evt, fptype* p, unsigned int* indices) {
fptype x = evt[indices[2 + indices[0] + 0]];
fptype y = evt[indices[2 + indices[0] + 1]];
fptype thresh = p[indices[1]];
fptype trigConst = p[indices[2]];
fptype linConst = p[indices[3]];
fptype z = p[indices[4]] - x - y;
trigConst *= (z - thresh);
fptype ret = threshCalc(trigConst, linConst);
//if ((1 > (int) floor(0.5 + evt[8])) && (gpuDebug & 1) && (paramIndices + debugParamIndex == indices))
//printf("TrigThreshold: (%f - %f = %f) -> %f %f\n", z, thresh, trigConst, linConst, ret);
return ret;
}
__device__ device_function_ptr ptr_to_TrigThresholdUpper = device_TrigThresholdUpper;
__device__ device_function_ptr ptr_to_TrigThresholdLower = device_TrigThresholdLower;
__device__ device_function_ptr ptr_to_VerySpecialEpisodeTrigThresholdUpper = device_VerySpecialEpisodeTrigThresholdUpper;
__device__ device_function_ptr ptr_to_VerySpecialEpisodeTrigThresholdLower = device_VerySpecialEpisodeTrigThresholdLower;
__host__ TrigThresholdThrustFunctor::TrigThresholdThrustFunctor (std::string n, Variable* _x, Variable* thresh, Variable* trigConst, Variable* linConst, bool upper)
: ThrustPdfFunctor(_x, n)
{
std::vector<unsigned int> pindices;
pindices.push_back(registerParameter(thresh));
pindices.push_back(registerParameter(trigConst));
pindices.push_back(registerParameter(linConst));
if (upper) hipMemcpyFromSymbol((void**) &host_fcn_ptr, ptr_to_TrigThresholdUpper, sizeof(void*));
else hipMemcpyFromSymbol((void**) &host_fcn_ptr, ptr_to_TrigThresholdLower, sizeof(void*));
initialise(pindices);
}
__host__ TrigThresholdThrustFunctor::TrigThresholdThrustFunctor (std::string n, Variable* _x, Variable* _y, Variable* thresh, Variable* trigConst, Variable* linConst, Variable* massConstant, bool upper)
: ThrustPdfFunctor(0, n)
{
registerObservable(_x);
registerObservable(_y);
std::vector<unsigned int> pindices;
pindices.push_back(registerParameter(thresh));
pindices.push_back(registerParameter(trigConst));
pindices.push_back(registerParameter(linConst));
pindices.push_back(registerParameter(massConstant));
if (upper) hipMemcpyFromSymbol((void**) &host_fcn_ptr, ptr_to_VerySpecialEpisodeTrigThresholdUpper, sizeof(void*));
else hipMemcpyFromSymbol((void**) &host_fcn_ptr, ptr_to_VerySpecialEpisodeTrigThresholdLower, sizeof(void*));
initialise(pindices);
}
| 71b795f4c45c9199098d473f6797a6b052f1ce60.cu | #include "TrigThresholdThrustFunctor.hh"
__device__ fptype threshCalc (fptype distance, fptype linConst) {
fptype ret = (distance > fptype(0.5) ? fptype(1) : (linConst + (1 - linConst) * SIN(distance * fptype(3.14159265))));
return ret;
}
__device__ fptype device_TrigThresholdUpper (fptype* evt, fptype* p, unsigned int* indices) {
fptype x = evt[indices[2 + indices[0]]];
fptype thresh = p[indices[1]];
fptype trigConst = p[indices[2]];
fptype linConst = p[indices[3]];
trigConst *= (thresh - x);
return threshCalc(trigConst, linConst);
}
__device__ fptype device_TrigThresholdLower (fptype* evt, fptype* p, unsigned int* indices) {
fptype x = evt[indices[2 + indices[0]]];
fptype thresh = p[indices[1]];
fptype trigConst = p[indices[2]];
fptype linConst = p[indices[3]];
trigConst *= (x - thresh);
return threshCalc(trigConst, linConst);
}
__device__ fptype device_VerySpecialEpisodeTrigThresholdUpper (fptype* evt, fptype* p, unsigned int* indices) {
// Annoying special case for use with Mikhail's efficiency function across the Dalitz plot
fptype x = evt[indices[2 + indices[0] + 0]];
fptype y = evt[indices[2 + indices[0] + 1]];
fptype thresh = p[indices[1]];
fptype trigConst = p[indices[2]];
fptype linConst = p[indices[3]];
fptype z = p[indices[4]] - x - y;
trigConst *= (thresh - z);
return threshCalc(trigConst, linConst);
}
__device__ fptype device_VerySpecialEpisodeTrigThresholdLower (fptype* evt, fptype* p, unsigned int* indices) {
fptype x = evt[indices[2 + indices[0] + 0]];
fptype y = evt[indices[2 + indices[0] + 1]];
fptype thresh = p[indices[1]];
fptype trigConst = p[indices[2]];
fptype linConst = p[indices[3]];
fptype z = p[indices[4]] - x - y;
trigConst *= (z - thresh);
fptype ret = threshCalc(trigConst, linConst);
//if ((1 > (int) floor(0.5 + evt[8])) && (gpuDebug & 1) && (paramIndices + debugParamIndex == indices))
//printf("TrigThreshold: (%f - %f = %f) -> %f %f\n", z, thresh, trigConst, linConst, ret);
return ret;
}
__device__ device_function_ptr ptr_to_TrigThresholdUpper = device_TrigThresholdUpper;
__device__ device_function_ptr ptr_to_TrigThresholdLower = device_TrigThresholdLower;
__device__ device_function_ptr ptr_to_VerySpecialEpisodeTrigThresholdUpper = device_VerySpecialEpisodeTrigThresholdUpper;
__device__ device_function_ptr ptr_to_VerySpecialEpisodeTrigThresholdLower = device_VerySpecialEpisodeTrigThresholdLower;
__host__ TrigThresholdThrustFunctor::TrigThresholdThrustFunctor (std::string n, Variable* _x, Variable* thresh, Variable* trigConst, Variable* linConst, bool upper)
: ThrustPdfFunctor(_x, n)
{
std::vector<unsigned int> pindices;
pindices.push_back(registerParameter(thresh));
pindices.push_back(registerParameter(trigConst));
pindices.push_back(registerParameter(linConst));
if (upper) cudaMemcpyFromSymbol((void**) &host_fcn_ptr, ptr_to_TrigThresholdUpper, sizeof(void*));
else cudaMemcpyFromSymbol((void**) &host_fcn_ptr, ptr_to_TrigThresholdLower, sizeof(void*));
initialise(pindices);
}
__host__ TrigThresholdThrustFunctor::TrigThresholdThrustFunctor (std::string n, Variable* _x, Variable* _y, Variable* thresh, Variable* trigConst, Variable* linConst, Variable* massConstant, bool upper)
: ThrustPdfFunctor(0, n)
{
registerObservable(_x);
registerObservable(_y);
std::vector<unsigned int> pindices;
pindices.push_back(registerParameter(thresh));
pindices.push_back(registerParameter(trigConst));
pindices.push_back(registerParameter(linConst));
pindices.push_back(registerParameter(massConstant));
if (upper) cudaMemcpyFromSymbol((void**) &host_fcn_ptr, ptr_to_VerySpecialEpisodeTrigThresholdUpper, sizeof(void*));
else cudaMemcpyFromSymbol((void**) &host_fcn_ptr, ptr_to_VerySpecialEpisodeTrigThresholdLower, sizeof(void*));
initialise(pindices);
}
|
c0d87791d91d64dc4b7a150d89a58dcbf246e066.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
/* Matrix transpose with Cuda
* Host code.
* This example transposes arbitrary-size matrices. It compares a naive
* transpose kernel that suffers from non-coalesced writes, to an optimized
* transpose with fully coalesced memory access and no bank conflicts. On
* a G80 GPU, the optimized transpose can be more than 10x faster for large
* matrices.
*/
#include <prof.cu>
// Utility and system includes
#include <shrUtils.h>
#include <cutil_inline.h>
// includes, kernels
#include <transpose_kernel.cu>
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, char** argv);
extern "C" void computeGold( float* reference, float* idata,
const unsigned int size_x, const unsigned int size_y );
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
GpuProfiling::initProf();
// Start logs
shrSetLogFileName ("transpose.txt");
shrLog("%s Starting...\n\n", argv[0]);
runTest( argc, argv);
exit(0);
shrEXIT(argc, (const char**)argv);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void
runTest( int argc, char** argv)
{
// size of the matrix
#ifdef __DEVICE_EMULATION__
const unsigned int size_x = 32;
const unsigned int size_y = 128;
#else
const unsigned int size_x = 256;
const unsigned int size_y = 4096;
#endif
// size of memory required to store the matrix
const unsigned int mem_size = sizeof(float) * size_x * size_y;
unsigned int timer;
cutCreateTimer(&timer);
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") )
cutilDeviceInit(argc, argv);
else
hipSetDevice( cutGetMaxGflopsDeviceId() );
// allocate host memory
float* h_idata = (float*) malloc(mem_size);
// initalize the memory
srand(15235911);
for( unsigned int i = 0; i < (size_x * size_y); ++i)
{
h_idata[i] = (float) i; // rand();
}
// allocate device memory
float* d_idata;
float* d_odata;
cutilSafeCall( hipMalloc( (void**) &d_idata, mem_size));
cutilSafeCall( hipMalloc( (void**) &d_odata, mem_size));
// copy host memory to device
cutilSafeCall( hipMemcpy( d_idata, h_idata, mem_size,
hipMemcpyHostToDevice) );
// setup execution parameters
dim3 grid(size_x / BLOCK_DIM, size_y / BLOCK_DIM, 1);
dim3 threads(BLOCK_DIM, BLOCK_DIM, 1);
// warmup so we don't time CUDA startup
GpuProfiling::prepareProfiling( grid, threads );
hipLaunchKernelGGL(( transpose_naive), dim3(grid), dim3(threads) , 0, 0, d_odata, d_idata, size_x, size_y);
GpuProfiling::addResults("transpose_naive");
GpuProfiling::prepareProfiling( grid, threads );
hipLaunchKernelGGL(( transpose), dim3(grid), dim3(threads) , 0, 0, d_odata, d_idata, size_x, size_y);
GpuProfiling::addResults("transpose");
// synchronize here, so we make sure that we don't count any time from the asynchronize kernel launches.
hipDeviceSynchronize();
// execute the naive kernel numIterations times
int numIterations = 100;
shrLog("Transposing a %d by %d matrix of floats...\n", size_x, size_y);
for (int i = -1; i < numIterations; ++i)
{
if (i == 0)
{
hipDeviceSynchronize();
cutStartTimer(timer);
}
GpuProfiling::prepareProfiling( grid, threads );
hipLaunchKernelGGL(( transpose_naive), dim3(grid), dim3(threads) , 0, 0, d_odata, d_idata, size_x, size_y);
GpuProfiling::addResults("transpose_naive");
}
hipDeviceSynchronize();
cutStopTimer(timer);
float naiveTime = 1.0e-3 * cutGetTimerValue(timer)/(double)numIterations;
// execute the optimized kernel numIterations times
for (int i = 0; i < numIterations; ++i)
{
if (i == 0)
{
hipDeviceSynchronize();
cutResetTimer(timer);
cutStartTimer(timer);
}
GpuProfiling::prepareProfiling( grid, threads );
hipLaunchKernelGGL(( transpose), dim3(grid), dim3(threads) , 0, 0, d_odata, d_idata, size_x, size_y);
GpuProfiling::addResults("transpose");
}
hipDeviceSynchronize();
cutStopTimer(timer);
float optimizedTime = 1.0e-3*cutGetTimerValue(timer) / (double)numIterations;
shrLog("Naive transpose average time: %0.3f ms\n", naiveTime / numIterations);
shrLog("Optimized transpose average time: %0.3f ms\n\n", optimizedTime / numIterations);
shrLogEx(LOGBOTH | MASTER, 0, "transpose-naive, Throughput = %.4f, Time = %.5f, Size = %u, NumDevsUsed = %u, Workgroup = %u\n",
1.0e-9 * (double)size_x * size_y / naiveTime, naiveTime, size_x * size_y, 1, 256);
shrLogEx(LOGBOTH | MASTER, 0, "transpose-optimized, Throughput = %.4f, Time = %.5f, Size = %u, NumDevsUsed = %u, Workgroup = %u\n",
1.0e-9 * (double)size_x * size_y / optimizedTime, optimizedTime, size_x * size_y, 1, 256);
// check if kernel execution generated and error
cutilCheckMsg("Kernel execution failed");
// copy result from device to host
float* h_odata = (float*) malloc(mem_size);
cutilSafeCall( hipMemcpy( h_odata, d_odata, mem_size,
hipMemcpyDeviceToHost) );
// compute reference solution
float* reference = (float*) malloc( mem_size);
computeGold( reference, h_idata, size_x, size_y);
// check result
CUTBoolean res = cutComparef( reference, h_odata, size_x * size_y);
shrLog("\n%s\n", (1 == res) ? "PASSED" : "FAILED");
GpuProfiling::printResults();
// cleanup memory
free(h_idata);
free(h_odata);
free( reference);
cutilSafeCall(hipFree(d_idata));
cutilSafeCall(hipFree(d_odata));
cutilCheckError( cutDeleteTimer(timer));
hipDeviceReset();
}
| c0d87791d91d64dc4b7a150d89a58dcbf246e066.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
/* Matrix transpose with Cuda
* Host code.
* This example transposes arbitrary-size matrices. It compares a naive
* transpose kernel that suffers from non-coalesced writes, to an optimized
* transpose with fully coalesced memory access and no bank conflicts. On
* a G80 GPU, the optimized transpose can be more than 10x faster for large
* matrices.
*/
#include <prof.cu>
// Utility and system includes
#include <shrUtils.h>
#include <cutil_inline.h>
// includes, kernels
#include <transpose_kernel.cu>
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, char** argv);
extern "C" void computeGold( float* reference, float* idata,
const unsigned int size_x, const unsigned int size_y );
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
GpuProfiling::initProf();
// Start logs
shrSetLogFileName ("transpose.txt");
shrLog("%s Starting...\n\n", argv[0]);
runTest( argc, argv);
exit(0);
shrEXIT(argc, (const char**)argv);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void
runTest( int argc, char** argv)
{
// size of the matrix
#ifdef __DEVICE_EMULATION__
const unsigned int size_x = 32;
const unsigned int size_y = 128;
#else
const unsigned int size_x = 256;
const unsigned int size_y = 4096;
#endif
// size of memory required to store the matrix
const unsigned int mem_size = sizeof(float) * size_x * size_y;
unsigned int timer;
cutCreateTimer(&timer);
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") )
cutilDeviceInit(argc, argv);
else
cudaSetDevice( cutGetMaxGflopsDeviceId() );
// allocate host memory
float* h_idata = (float*) malloc(mem_size);
// initalize the memory
srand(15235911);
for( unsigned int i = 0; i < (size_x * size_y); ++i)
{
h_idata[i] = (float) i; // rand();
}
// allocate device memory
float* d_idata;
float* d_odata;
cutilSafeCall( cudaMalloc( (void**) &d_idata, mem_size));
cutilSafeCall( cudaMalloc( (void**) &d_odata, mem_size));
// copy host memory to device
cutilSafeCall( cudaMemcpy( d_idata, h_idata, mem_size,
cudaMemcpyHostToDevice) );
// setup execution parameters
dim3 grid(size_x / BLOCK_DIM, size_y / BLOCK_DIM, 1);
dim3 threads(BLOCK_DIM, BLOCK_DIM, 1);
// warmup so we don't time CUDA startup
GpuProfiling::prepareProfiling( grid, threads );
transpose_naive<<< grid, threads >>>(d_odata, d_idata, size_x, size_y);
GpuProfiling::addResults("transpose_naive");
GpuProfiling::prepareProfiling( grid, threads );
transpose<<< grid, threads >>>(d_odata, d_idata, size_x, size_y);
GpuProfiling::addResults("transpose");
// synchronize here, so we make sure that we don't count any time from the asynchronize kernel launches.
cudaThreadSynchronize();
// execute the naive kernel numIterations times
int numIterations = 100;
shrLog("Transposing a %d by %d matrix of floats...\n", size_x, size_y);
for (int i = -1; i < numIterations; ++i)
{
if (i == 0)
{
cudaThreadSynchronize();
cutStartTimer(timer);
}
GpuProfiling::prepareProfiling( grid, threads );
transpose_naive<<< grid, threads >>>(d_odata, d_idata, size_x, size_y);
GpuProfiling::addResults("transpose_naive");
}
cudaThreadSynchronize();
cutStopTimer(timer);
float naiveTime = 1.0e-3 * cutGetTimerValue(timer)/(double)numIterations;
// execute the optimized kernel numIterations times
for (int i = 0; i < numIterations; ++i)
{
if (i == 0)
{
cudaThreadSynchronize();
cutResetTimer(timer);
cutStartTimer(timer);
}
GpuProfiling::prepareProfiling( grid, threads );
transpose<<< grid, threads >>>(d_odata, d_idata, size_x, size_y);
GpuProfiling::addResults("transpose");
}
cudaThreadSynchronize();
cutStopTimer(timer);
float optimizedTime = 1.0e-3*cutGetTimerValue(timer) / (double)numIterations;
shrLog("Naive transpose average time: %0.3f ms\n", naiveTime / numIterations);
shrLog("Optimized transpose average time: %0.3f ms\n\n", optimizedTime / numIterations);
shrLogEx(LOGBOTH | MASTER, 0, "transpose-naive, Throughput = %.4f, Time = %.5f, Size = %u, NumDevsUsed = %u, Workgroup = %u\n",
1.0e-9 * (double)size_x * size_y / naiveTime, naiveTime, size_x * size_y, 1, 256);
shrLogEx(LOGBOTH | MASTER, 0, "transpose-optimized, Throughput = %.4f, Time = %.5f, Size = %u, NumDevsUsed = %u, Workgroup = %u\n",
1.0e-9 * (double)size_x * size_y / optimizedTime, optimizedTime, size_x * size_y, 1, 256);
// check if kernel execution generated and error
cutilCheckMsg("Kernel execution failed");
// copy result from device to host
float* h_odata = (float*) malloc(mem_size);
cutilSafeCall( cudaMemcpy( h_odata, d_odata, mem_size,
cudaMemcpyDeviceToHost) );
// compute reference solution
float* reference = (float*) malloc( mem_size);
computeGold( reference, h_idata, size_x, size_y);
// check result
CUTBoolean res = cutComparef( reference, h_odata, size_x * size_y);
shrLog("\n%s\n", (1 == res) ? "PASSED" : "FAILED");
GpuProfiling::printResults();
// cleanup memory
free(h_idata);
free(h_odata);
free( reference);
cutilSafeCall(cudaFree(d_idata));
cutilSafeCall(cudaFree(d_odata));
cutilCheckError( cutDeleteTimer(timer));
cudaThreadExit();
}
|
ab7b38c6e36deb3a3c1ac0e33935200f308ad94e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "kernel.h"
#include "reducedMath.h"
#include <iostream>
using nvinfer1::rt::reduced_divisor;
template <unsigned nthdsPerCTA>
__launch_bounds__(nthdsPerCTA)
__global__ void gridAnchorKernel(
const GridAnchorParameters param,
const int numAspectRatios,
reduced_divisor divObj,
const float* widths,
const float* heights,
float* outputData
)
{
// output dims: (H, W, param.numMinSize, (1+haveMaxSize+numAR-1), 4)
const int dim = param.H * param.W * numAspectRatios;
/*
* Parameters used to calculate the bounding box coordinates back to input image scale
* Normally we calculate the anchorStride = image_input_size (in pixel) / feature_map_size
* Here we do not use image_input_size for the moment
* Instead we use 1.0
* The coordinates calculated are scaled by the input image size.
* Most of the coordinates will be in a range of [0, 1], except for the bounding box coordinates going outside of the image
* Every coordinate will go back to the pixel coordinates in the input image if being multiplied by image_input_size
* Here we implicitly assumes the image input and feature map are square
*/
float anchorStride = (1.0 / param.H);
float anchorOffset = 0.5 * anchorStride;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= dim)
return;
int arId, currIndex;
divObj.divmod(tid, currIndex, arId);
const int w = currIndex % param.W;
const int h = currIndex / param.W;
// Center coordinates
float yC = h * anchorStride + anchorOffset;
float xC = w * anchorStride + anchorOffset;
// x_min, y_min
float xMin = xC - 0.5 * widths[arId];
float yMin = yC - 0.5 * heights[arId];
// x_max, y_max
float xMax = xC + 0.5 * widths[arId];
float yMax = yC + 0.5 * heights[arId];
outputData[tid * 4] = xMin;
outputData[tid * 4 + 1] = yMin;
outputData[tid * 4 + 2] = xMax;
outputData[tid * 4 + 3] = yMax;
// Remember to move the output cursor
float* output = outputData + dim * 4;
// Simply copying the variance
output[tid * 4] = param.variance[0];
output[tid * 4 + 1] = param.variance[1];
output[tid * 4 + 2] = param.variance[2];
output[tid * 4 + 3] = param.variance[3];
}
pluginStatus_t anchorGridInference(
hipStream_t stream,
const GridAnchorParameters param,
const int numAspectRatios,
const void* widths,
const void* heights,
void* outputData
)
{
const int dim = param.H * param.W * numAspectRatios;
reduced_divisor divObj(numAspectRatios);
if (dim > 5120)
{
const int BS = 128;
const int GS = (dim + BS - 1) / BS;
hipLaunchKernelGGL(( gridAnchorKernel<BS>), dim3(GS), dim3(BS), 0, stream, param, numAspectRatios, divObj,
(const float*) widths, (const float*) heights,
(float*) outputData);
}
else
{
const int BS = 32;
const int GS = (dim + BS - 1) / BS;
hipLaunchKernelGGL(( gridAnchorKernel<BS>), dim3(GS), dim3(BS), 0, stream, param, numAspectRatios, divObj,
(const float*) widths, (const float*) heights,
(float*) outputData);
}
CSC(hipGetLastError(), STATUS_FAILURE);
return STATUS_SUCCESS;
}
| ab7b38c6e36deb3a3c1ac0e33935200f308ad94e.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "kernel.h"
#include "reducedMath.h"
#include <iostream>
using nvinfer1::rt::reduced_divisor;
template <unsigned nthdsPerCTA>
__launch_bounds__(nthdsPerCTA)
__global__ void gridAnchorKernel(
const GridAnchorParameters param,
const int numAspectRatios,
reduced_divisor divObj,
const float* widths,
const float* heights,
float* outputData
)
{
// output dims: (H, W, param.numMinSize, (1+haveMaxSize+numAR-1), 4)
const int dim = param.H * param.W * numAspectRatios;
/*
* Parameters used to calculate the bounding box coordinates back to input image scale
* Normally we calculate the anchorStride = image_input_size (in pixel) / feature_map_size
* Here we do not use image_input_size for the moment
* Instead we use 1.0
* The coordinates calculated are scaled by the input image size.
* Most of the coordinates will be in a range of [0, 1], except for the bounding box coordinates going outside of the image
* Every coordinate will go back to the pixel coordinates in the input image if being multiplied by image_input_size
* Here we implicitly assumes the image input and feature map are square
*/
float anchorStride = (1.0 / param.H);
float anchorOffset = 0.5 * anchorStride;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= dim)
return;
int arId, currIndex;
divObj.divmod(tid, currIndex, arId);
const int w = currIndex % param.W;
const int h = currIndex / param.W;
// Center coordinates
float yC = h * anchorStride + anchorOffset;
float xC = w * anchorStride + anchorOffset;
// x_min, y_min
float xMin = xC - 0.5 * widths[arId];
float yMin = yC - 0.5 * heights[arId];
// x_max, y_max
float xMax = xC + 0.5 * widths[arId];
float yMax = yC + 0.5 * heights[arId];
outputData[tid * 4] = xMin;
outputData[tid * 4 + 1] = yMin;
outputData[tid * 4 + 2] = xMax;
outputData[tid * 4 + 3] = yMax;
// Remember to move the output cursor
float* output = outputData + dim * 4;
// Simply copying the variance
output[tid * 4] = param.variance[0];
output[tid * 4 + 1] = param.variance[1];
output[tid * 4 + 2] = param.variance[2];
output[tid * 4 + 3] = param.variance[3];
}
pluginStatus_t anchorGridInference(
cudaStream_t stream,
const GridAnchorParameters param,
const int numAspectRatios,
const void* widths,
const void* heights,
void* outputData
)
{
const int dim = param.H * param.W * numAspectRatios;
reduced_divisor divObj(numAspectRatios);
if (dim > 5120)
{
const int BS = 128;
const int GS = (dim + BS - 1) / BS;
gridAnchorKernel<BS><<<GS, BS, 0, stream>>>(param, numAspectRatios, divObj,
(const float*) widths, (const float*) heights,
(float*) outputData);
}
else
{
const int BS = 32;
const int GS = (dim + BS - 1) / BS;
gridAnchorKernel<BS><<<GS, BS, 0, stream>>>(param, numAspectRatios, divObj,
(const float*) widths, (const float*) heights,
(float*) outputData);
}
CSC(cudaGetLastError(), STATUS_FAILURE);
return STATUS_SUCCESS;
}
|
ac04f991684bbb2c7414238f68d31a50cea5d785.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2013-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <global_thread_handle.h>
#include <iostream>
#include <memory>
#include <error.h>
#include <limits>
#include <vector>
#include <cassert>
#include <amgx_timer.h>
#include <algorithm>
#include <iomanip>
#if defined(_WIN32)
#include <stddef.h>
#else
#include <inttypes.h>
#endif
#define PAGE_SIZE 4096
// threshold to consider using pre-allocated pool
#define PINNED_POOL_SIZE_THRESHOLD (100*1024*1024)
// 8 MB for pool allocations on host & device
#define PINNED_POOL_SIZE ( 100 * 1024 * 1024)
// set that macro on if you want to see print info
// #define AMGX_PRINT_MEMORY_INFO 1
// set that macro to print the call stack for each malloc/free (it's extensive).
// #define AMGX_PRINT_MALLOC_CALL_STACK 1
// #define MULTIGPU 1
_thread_id getCurrentThreadId()
{
#ifdef WIN32
return GetCurrentThreadId();
#else
return pthread_self();
#endif
}
namespace amgx
{
namespace memory
{
MemoryPool::MemoryPool(size_t max_block_size, size_t page_size, size_t max_size)
: m_size(0)
, m_max_size(max_size)
, m_max_block_size(max_block_size)
, m_page_size(page_size)
, m_free_mem(0)
, m_used_blocks()
, m_free_blocks()
, m_recently_merged(false)
{
//initializeCriticalSection(&m_mutex2);
}
MemoryPool::~MemoryPool()
{
#ifdef MULTIGPU
int rank;
MPI_Comm_rank( MPI_COMM_WORLD, &rank );
if (rank == 0)
{
#endif
if ( !m_used_blocks.empty() )
{
std::cerr << "!!! detected some memory leaks in the code: trying to free non-empty temporary device pool !!!" << std::endl;
for ( MemoryBlockListIterator it = m_used_blocks.begin() ; it != m_used_blocks.end() ; ++it )
{
std::cerr << "ptr: " << std::setw(18) << (void *) get_block_begin(it) << " size: " << get_block_size(it) << std::endl;
}
}
//deleteCriticalSection(&m_mutex2);
#ifdef MULTIGPU
}
#endif
}
void MemoryPool::add_memory(void *ptr, size_t size, bool managed)
{
if (m_max_size != 0 && managed && (size + m_size > m_max_size))
{
FatalError("Memory pool limit is reached", AMGX_ERR_NO_MEMORY);
}
m_mutex2.lock();
m_owned_ptrs.push_back(MemoryBlock(ptr, size, true, managed));
char *aligned_ptr = (char *) ptr;
if ( (size_t) aligned_ptr % m_page_size )
{
aligned_ptr = (char *) ((((size_t) aligned_ptr + m_page_size - 1) / m_page_size) * m_page_size);
}
size_t free_size = size - (aligned_ptr - (char *) ptr);
#ifdef AMGX_PRINT_MEMORY_INFO
// std::cerr << "INFO: Adding memory block " << (void*) aligned_ptr << " " << free_size << std::endl;
#endif
m_free_blocks.push_back(MemoryBlock(aligned_ptr, free_size, true, managed));
m_size += free_size;
m_free_mem += free_size;
m_mutex2.unlock();
}
void *MemoryPool::allocate(size_t size, size_t &allocated_size)
{
m_mutex2.lock();
void *ptr = NULL;
// Fail if the size is 0.
if ( size == 0 )
{
FatalError("Allocating memory buffer of size 0!!!", AMGX_ERR_BAD_PARAMETERS);
}
// The memory size we are actually going to allocate.
size_t aligned_size = m_page_size * ((size + m_page_size - 1) / m_page_size);
// The chosen block (if any).
MemoryBlockListIterator best_it = m_free_blocks.end();
// The best cost (wasted amount of memory).
size_t best_cost = std::numeric_limits<size_t>::max();
// The address of the first correctly aligned region we're interested in.
char *best_aligned_ptr = NULL;
// Look for a large enough block.
for ( MemoryBlockListIterator it = m_free_blocks.begin() ; it != m_free_blocks.end() ; ++it )
{
#ifdef AMGX_PRINT_MEMORY_INFO
#ifdef MULTIGPU
int rank;
MPI_Comm_rank( MPI_COMM_WORLD, &rank );
if (rank == 0)
#endif
{
std::cerr << "INFO: [block " << std::setw(18) << (void *) get_block_begin(it)
<< " " << std::setw(12) << get_block_size(it) << std::endl;
}
#endif
// Get an aligned pointer.
char *aligned_ptr = get_block_begin(it);
// Make sure alignments are fine. It shouldn't be needed but it's actually cheap to test.
if ( (size_t) aligned_ptr & (m_page_size - 1) )
{
FatalError("INTERNAL ERROR: Invalid alignment!!!", AMGX_ERR_UNKNOWN);
}
// If the pointer fits in that block, just keep it.
if ( aligned_size > get_block_size(it) )
{
continue;
}
// The cost.
size_t cost = get_block_size(it) - aligned_size;
// If the cost is better, keep it.
if ( cost < best_cost )
{
best_it = it;
best_cost = cost;
best_aligned_ptr = aligned_ptr;
}
}
// No block found??? Fallback to regular malloc treated outside of this function.
if ( best_it == m_free_blocks.end() )
{
allocated_size = 0;
m_mutex2.unlock();
return ptr;
}
// Our allocation starts at aligned_ptr.
ptr = best_aligned_ptr;
// Allocated size.
allocated_size = aligned_size;
// Store the used block.
MemoryBlock used_block(best_aligned_ptr, aligned_size, is_block_first(best_it));
m_used_blocks.push_back(used_block);
// Update statistics.
m_free_mem -= aligned_size;
// We store the pointer to the beginning of the block.
char *block_begin = get_block_begin(best_it);
// ... and its size.
size_t block_size = get_block_size(best_it);
// We use all the block. Simply remove it.
if ( best_aligned_ptr == block_begin && aligned_size == block_size )
{
m_free_blocks.erase(best_it);
}
else
{
set_block_begin(best_it, best_aligned_ptr + aligned_size);
set_block_size (best_it, block_size - aligned_size);
best_it->m_first = false;
}
m_mutex2.unlock();
// fallback to regular malloc treated outside of this function
return ptr;
}
void MemoryPool::free(void *ptr, size_t &freed_size)
{
m_mutex2.lock();
// Find the element to remove.
MemoryBlockListIterator it = m_used_blocks.begin();
for ( ; it != m_used_blocks.end() ; ++it )
if ( get_block_begin(it) == ptr )
{
break;
}
// Sanity check.
if ( it == m_used_blocks.end() )
{
FatalError("INTERNAL ERROR: Invalid iterator!!!", AMGX_ERR_UNKNOWN);
}
// We keep the pointers sorted. So find where to insert the new block.
MemoryBlockListIterator insert_it = m_free_blocks.begin();
for ( ; insert_it != m_free_blocks.end() ; ++insert_it )
{
// Same pointer in used and free... That's surely a bug.
if ( get_block_begin(insert_it) == get_block_begin(it) )
{
FatalError("INTERNAL ERROR: Invalid memory block iterator!!! Free was called twice on same pointer.", AMGX_ERR_UNKNOWN);
}
if ( get_block_begin(insert_it) > get_block_begin(it) )
{
break;
}
}
m_free_blocks.insert(insert_it, *it);
// We merge contiguous blocks.
MemoryBlockListIterator first = m_free_blocks.begin();
MemoryBlockListIterator last = m_free_blocks.begin();
char *last_ptr = get_block_begin(first) + get_block_size(first);
size_t merged_size = get_block_size(first);
int num_merged_blocks = 0;
for ( ++last ; last != m_free_blocks.end() ; ++last )
{
if ( last_ptr != get_block_begin(last) || is_block_first(last) ) // We won't merge those two.
{
if ( num_merged_blocks != 0 ) // We have found the end of the block.
{
break;
}
// We have found nothing to merge... Shift the window.
first = last;
last_ptr = get_block_begin(first) + get_block_size(first);
merged_size = get_block_size(first);
}
else
{
last_ptr = get_block_begin(last) + get_block_size(last);
merged_size += get_block_size(last);
num_merged_blocks++;
}
}
#ifdef AMGX_PRINT_MEMORY_INFO
#ifdef MULTIGPU
int rank;
MPI_Comm_rank( MPI_COMM_WORLD, &rank );
if (rank == 0)
#endif
{
std::cerr << "INFO: Merging " << num_merged_blocks << " blocks" << std::endl;
}
#endif
if ( num_merged_blocks != 0 ) // Do the merge.
{
set_block_size(first, merged_size);
first++;
m_free_blocks.erase(first, last);
}
// Remove the used block and update statistics.
m_free_mem += get_block_size(it);
m_used_blocks.erase(it);
//m_recently_merged = true;
m_mutex2.unlock();
}
void MemoryPool::free_all()
{
m_mutex2.lock();
m_used_blocks.clear();
m_free_blocks.clear();
std::vector<MemoryBlock> owned_ptrs = m_owned_ptrs;
m_owned_ptrs.clear();
for ( size_t i = 0 ; i < owned_ptrs.size() ; ++i )
{
add_memory(owned_ptrs[i].m_begin, owned_ptrs[i].m_size, owned_ptrs[i].m_managed);
}
m_free_mem = m_size;
m_mutex2.unlock();
}
bool MemoryPool::is_allocated(void *ptr)
{
m_mutex2.lock();
for ( MemoryBlockListConstIterator it = m_used_blocks.begin() ; it != m_used_blocks.end() ; ++it )
if ( it->m_begin == ptr )
{
m_mutex2.unlock();
return true;
}
m_mutex2.unlock();
return false;
}
PinnedMemoryPool::PinnedMemoryPool()
: MemoryPool(PINNED_POOL_SIZE_THRESHOLD, 4096, 0)
{
void *ptr = NULL;
::hipHostMalloc(&ptr, PINNED_POOL_SIZE);
if ( ptr == NULL )
{
FatalError("Cannot allocate pinned memory", AMGX_ERR_NO_MEMORY);
}
add_memory(ptr, PINNED_POOL_SIZE);
}
PinnedMemoryPool::~PinnedMemoryPool()
{
for ( size_t i = 0 ; i < m_owned_ptrs.size() ; ++i )
if (m_owned_ptrs[i].m_managed)
{
::hipHostFree(m_owned_ptrs[i].m_begin);
}
m_owned_ptrs.clear();
}
DeviceMemoryPool::DeviceMemoryPool(size_t size,
size_t max_block_size,
size_t max_size)
: MemoryPool(max_block_size, 4096, max_size)
{
if (max_size > 0 && size > max_size)
{
FatalError("Initial size for the memory pool specified is more than memory limit", AMGX_ERR_NO_MEMORY);
}
void *ptr = NULL;
::hipMalloc(&ptr, size);
if ( ptr == NULL )
{
FatalError("Cannot allocate device memory", AMGX_ERR_NO_MEMORY);
}
add_memory(ptr, size);
}
void DeviceMemoryPool::expandPool(size_t size,
size_t max_block_size)
{
if (this->m_max_size > 0 && (size + this->m_size) > this->m_max_size)
{
FatalError("Pool memory size is exceeded.", AMGX_ERR_NO_MEMORY);
}
void *ptr = NULL;
::hipMalloc(&ptr, size);
if ( ptr == NULL )
{
FatalError("Cannot allocate device memory", AMGX_ERR_NO_MEMORY);
}
add_memory(ptr, size);
}
DeviceMemoryPool::~DeviceMemoryPool()
{
for ( size_t i = 0 ; i < m_owned_ptrs.size() ; ++i )
if (m_owned_ptrs[i].m_managed)
{
::hipFree(m_owned_ptrs[i].m_begin);
}
m_owned_ptrs.clear();
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////
struct MemoryManager
{
// Get the global instance.
static MemoryManager &get_instance()
{
static MemoryManager s_instance;
return s_instance;
}
// Ctor.
MemoryManager()
: m_main_pinned_pool(NULL)
, m_main_device_pool(NULL)
, m_use_async_free(false)
, m_use_device_pool(false)
, m_alloc_scaling_factor(0)
, m_alloc_scaling_threshold(16 * 1024 * 1024)
{
//initializeCriticalSection(&m_mutex);
}
// Dtor.
~MemoryManager()
{
//deleteCriticalSection(&m_mutex);
}
// Synchronize a device pool.
void sync_pinned_pool(PinnedMemoryPool *pool);
void sync_device_pool(DeviceMemoryPool *pool);
// Scale a memory size.
size_t scale(size_t size) const
{
size_t new_size = size;
if ( size >= m_alloc_scaling_threshold )
{
new_size += m_alloc_scaling_factor * (size / 100);
}
return new_size;
}
// Mutex to make functions thread-safe.
std::recursive_mutex m_mutex;
// Streams.
typedef std::map<_thread_id, hipStream_t> StreamMap;
StreamMap m_thread_stream;
hipStream_t m_main_stream;
// Items to free (async free).
// typedef std::map<_thread_id, std::vector<void*> > AsyncFreeMap;
// AsyncFreeMap m_thread_free;
// std::vector<void*> m_main_free;
// Pinned pools.
typedef std::map<_thread_id, PinnedMemoryPool *> PinnedPoolMap;
PinnedPoolMap m_thread_pinned_pools;
PinnedMemoryPool *m_main_pinned_pool;
// Device pools.
typedef std::map<_thread_id, DeviceMemoryPool *> DevicePoolMap;
DevicePoolMap m_thread_device_pools;
DeviceMemoryPool *m_main_device_pool;
// Registered memory blocks.
typedef std::vector<std::pair<void *, void *> > RegisteredBlocks;
typedef std::map<_thread_id, RegisteredBlocks> RegisteredBlocksMap;
RegisteredBlocksMap m_thread_registered;
RegisteredBlocks m_main_registered;
// We keep a list of allocations that go through hipMalloc.
typedef std::map<void *, size_t> MemoryBlockMap;
MemoryBlockMap m_allocated_blocks;
// whether we want to use async free/wait or regular free.
bool m_use_async_free;
// whether we want to use device pool or simply do regular malloc.
bool m_use_device_pool;
// Scaling factor.
size_t m_alloc_scaling_factor;
// Scaling threshold.
size_t m_alloc_scaling_threshold;
};
void MemoryManager::sync_pinned_pool(PinnedMemoryPool *pool)
{
MemoryPool *mem_pool = (MemoryPool *) pool;
assert(mem_pool);
MemoryPool *main_pool = (MemoryPool *) m_main_pinned_pool;
main_pool->m_used_blocks.insert(main_pool->m_used_blocks.end(),
mem_pool->get_used_begin(),
mem_pool->get_used_end());
mem_pool->free_all();
}
void MemoryManager::sync_device_pool(DeviceMemoryPool *pool)
{
MemoryPool *mem_pool = (MemoryPool *) pool;
assert(mem_pool);
MemoryPool *main_pool = (MemoryPool *) m_main_device_pool;
main_pool->m_used_blocks.insert(main_pool->m_used_blocks.end(),
mem_pool->get_used_begin(),
mem_pool->get_used_end());
mem_pool->free_all();
}
bool hasPinnedMemoryPool()
{
MemoryManager &manager = MemoryManager::get_instance();
return manager.m_main_pinned_pool != NULL;
}
bool hasDeviceMemoryPool()
{
MemoryManager &manager = MemoryManager::get_instance();
return manager.m_main_device_pool != NULL;
}
void setPinnedMemoryPool(PinnedMemoryPool *pool)
{
MemoryManager &manager = MemoryManager::get_instance();
manager.m_mutex.lock();
manager.m_main_pinned_pool = pool;
manager.m_mutex.unlock();
}
void setDeviceMemoryPool(DeviceMemoryPool *pool)
{
MemoryManager &manager = MemoryManager::get_instance();
manager.m_mutex.lock();
manager.m_main_device_pool = pool;
manager.m_mutex.unlock();
}
void setPinnedMemoryPool(_thread_id thread_id, PinnedMemoryPool *pool)
{
MemoryManager &manager = MemoryManager::get_instance();
manager.m_mutex.lock();
manager.m_thread_pinned_pools[thread_id] = pool;
manager.m_mutex.unlock();
}
void setDeviceMemoryPool(_thread_id thread_id, DeviceMemoryPool *pool)
{
MemoryManager &manager = MemoryManager::get_instance();
manager.m_mutex.lock();
manager.m_thread_device_pools[thread_id] = pool;
manager.m_mutex.unlock();
}
void destroyPinnedMemoryPool()
{
MemoryManager &manager = MemoryManager::get_instance();
manager.m_mutex.lock();
delete manager.m_main_pinned_pool;
manager.m_main_pinned_pool = NULL;
manager.m_mutex.unlock();
}
void destroyDeviceMemoryPool()
{
MemoryManager &manager = MemoryManager::get_instance();
manager.m_mutex.lock();
delete manager.m_main_device_pool;
manager.m_main_device_pool = NULL;
manager.m_mutex.unlock();
}
void destroyPinnedMemoryPool(_thread_id thread_id)
{
MemoryManager &manager = MemoryManager::get_instance();
manager.m_mutex.lock();
MemoryManager::PinnedPoolMap::iterator it = manager.m_thread_pinned_pools.find(thread_id);
if ( it == manager.m_thread_pinned_pools.end() )
{
FatalError("INTERNAL ERROR: Invalid pinned memory pool", AMGX_ERR_UNKNOWN);
}
delete it->second;
manager.m_thread_pinned_pools.erase(it);
manager.m_mutex.unlock();
}
void destroyDeviceMemoryPool(_thread_id thread_id)
{
MemoryManager &manager = MemoryManager::get_instance();
manager.m_mutex.lock();
MemoryManager::DevicePoolMap::iterator it = manager.m_thread_device_pools.find(thread_id);
if ( it == manager.m_thread_device_pools.end() )
{
FatalError("INTERNAL ERROR: Invalid device memory pool", AMGX_ERR_UNKNOWN);
}
delete it->second;
manager.m_thread_device_pools.erase(it);
manager.m_mutex.unlock();
}
void destroyAllPinnedMemoryPools()
{
MemoryManager &manager = MemoryManager::get_instance();
manager.m_mutex.lock();
MemoryManager::PinnedPoolMap::iterator it = manager.m_thread_pinned_pools.begin();
for ( ; it != manager.m_thread_pinned_pools.end() ; ++it )
{
delete it->second;
manager.m_thread_pinned_pools.erase(it);
}
destroyPinnedMemoryPool();
manager.m_mutex.unlock();
}
void destroyAllDeviceMemoryPools()
{
MemoryManager &manager = MemoryManager::get_instance();
manager.m_mutex.lock();
MemoryManager::DevicePoolMap::iterator it = manager.m_thread_device_pools.begin();
for ( ; it != manager.m_thread_device_pools.end() ; ++it )
{
delete it->second;
manager.m_thread_device_pools.erase(it);
}
destroyDeviceMemoryPool();
manager.m_mutex.unlock();
}
void setAsyncFreeFlag(bool set)
{
MemoryManager &manager = MemoryManager::get_instance();
manager.m_use_async_free = set;
}
void setDeviceMemoryPoolFlag(bool set)
{
MemoryManager &manager = MemoryManager::get_instance();
manager.m_use_device_pool = set;
}
void setMallocScalingFactor(size_t factor)
{
MemoryManager &manager = MemoryManager::get_instance();
manager.m_alloc_scaling_factor = factor;
}
void setMallocScalingThreshold(size_t threshold)
{
MemoryManager &manager = MemoryManager::get_instance();
manager.m_alloc_scaling_threshold = threshold;
}
void createAsyncFreePool(_thread_id thread_id)
{
/*
MemoryManager &manager = MemoryManager::get_instance();
manager.m_mutex.lock();
manager.m_thread_free[thread_id] = std::vector<void*>();
manager.m_mutex.unlock(); */
}
void setMainStream(hipStream_t stream)
{
MemoryManager &manager = MemoryManager::get_instance();
manager.m_main_stream = stream;
}
void setStream(_thread_id thread_id, hipStream_t stream)
{
MemoryManager &manager = MemoryManager::get_instance();
manager.m_mutex.lock();
manager.m_thread_stream[thread_id] = stream;
manager.m_mutex.unlock();
}
hipStream_t getStream()
{
MemoryManager &manager = MemoryManager::get_instance();
_thread_id thread_id = getCurrentThreadId();
MemoryManager::StreamMap::iterator it = manager.m_thread_stream.find(thread_id);
if ( it != manager.m_thread_stream.end() )
{
return it->second;
}
return manager.m_main_stream;
}
void hipHostRegister(void *ptr, int size)
{
MemoryManager &manager = MemoryManager::get_instance();
_thread_id thread_id = getCurrentThreadId();
MemoryManager::RegisteredBlocks *blocks = &manager.m_main_registered;
MemoryManager::RegisteredBlocksMap::iterator it = manager.m_thread_registered.find(thread_id);
if ( it != manager.m_thread_registered.end() )
{
blocks = &it->second;
}
bool reg = true;
for ( size_t i = 0; i < blocks->size() ; ++i )
if ( blocks->at(i).first <= ptr && blocks->at(i).second >= ptr)
{
reg = false;
break;
}
if ( reg )
{
::hipHostRegister(ptr, size, 0);
blocks->push_back(std::pair<void *, void *>(ptr, (char *)ptr + size));
}
}
hipError_t hipHostMalloc(void **ptr, size_t size)
{
MemoryManager &manager = MemoryManager::get_instance();
_thread_id thread_id = getCurrentThreadId();
PinnedMemoryPool *pool = manager.m_main_pinned_pool;
MemoryManager::PinnedPoolMap::iterator it = manager.m_thread_pinned_pools.find(thread_id);
if ( it != manager.m_thread_pinned_pools.end() )
{
pool = it->second;
}
size_t allocated_size = 0;
hipError_t error = hipSuccess;
void *new_ptr = NULL;
if ( pool != NULL && size < PINNED_POOL_SIZE_THRESHOLD )
{
new_ptr = pool->allocate(size, allocated_size);
}
if ( pool != NULL && new_ptr == NULL && size < PINNED_POOL_SIZE_THRESHOLD ) // retry with size
{
new_ptr = pool->allocate(size, allocated_size);
}
if ( new_ptr != NULL )
{
*ptr = new_ptr;
}
else
{
//printf("calling hipHostMalloc, size = %lu\n",size);
error = ::hipHostMalloc(ptr, size);
}
return error;
}
hipError_t hipHostFree(void *ptr)
{
MemoryManager &manager = MemoryManager::get_instance();
_thread_id thread_id = getCurrentThreadId();
PinnedMemoryPool *pool = manager.m_main_pinned_pool;
MemoryManager::PinnedPoolMap::iterator it = manager.m_thread_pinned_pools.find(thread_id);
if ( it != manager.m_thread_pinned_pools.end() )
{
pool = it->second;
}
size_t freed_size = 0;
hipError_t error = hipSuccess;
if ( pool != NULL && pool->is_allocated(ptr) )
{
pool->free(ptr, freed_size);
}
else
{
//printf("calling hipHostFree\n");
error = ::hipHostFree(ptr);
}
return error;
}
hipError_t hipMalloc(void **ptr, size_t size)
{
AMGX_CPU_PROFILER("hipMalloc");
#ifdef AMGX_PRINT_MALLOC_CALL_STACK
#ifdef MULTIGPU
int rank;
MPI_Comm_rank( MPI_COMM_WORLD, &rank );
if (rank == 0)
#endif
{
std::cerr << "----" << std::endl;
std::cerr << "hipMalloc call stack:" << std::endl;
printStackTrace(std::cerr);
}
#endif
MemoryManager &manager = MemoryManager::get_instance();
DeviceMemoryPool *pool = manager.m_main_device_pool;
_thread_id thread_id = getCurrentThreadId();
MemoryManager::DevicePoolMap::iterator it = manager.m_thread_device_pools.find(thread_id);
if ( it != manager.m_thread_device_pools.end() )
{
pool = it->second;
}
bool use_pool = manager.m_use_device_pool;
#ifdef AMGX_PRINT_MEMORY_INFO
bool print_fallback = false;
#endif
size_t allocated_size = 0;
hipError_t error = hipSuccess;
void *new_ptr = NULL;
if ( pool != NULL /*&& size < pool->get_max_block_size()*/ && use_pool )
{
new_ptr = pool->allocate(size, allocated_size);
}
if ( new_ptr != NULL )
{
*ptr = new_ptr;
}
else
{
#ifdef AMGX_PRINT_MEMORY_INFO
print_fallback = true;
#endif
// We allocate an extra fraction here.
allocated_size = manager.scale(size);
// We hack the size to make it a multiple of a page size.
allocated_size = PAGE_SIZE * ((allocated_size + PAGE_SIZE - 1) / PAGE_SIZE);
error = ::hipMalloc(ptr, allocated_size);
// Very last attempt. Try without over allocation.
if ( *ptr == NULL )
{
allocated_size = size;
error = ::hipMalloc(ptr, allocated_size);
}
manager.m_mutex.lock();
manager.m_allocated_blocks[*ptr] = allocated_size;
manager.m_mutex.unlock();
#ifdef AMGX_PRINT_MEMORY_INFO
#ifdef MULTIGPU
if (rank == 0)
#endif
{
std::cerr << "INFO: Registered [block " << std::setw(18) << *ptr << " size: " << allocated_size << "]" << std::endl;
}
#endif
}
#ifdef AMGX_PRINT_MEMORY_INFO
#ifdef MULTIGPU
if (rank == 0)
#endif
{
if ( print_fallback )
{
std::cerr << "hipMalloc ";
}
else
{
std::cerr << "pool::allocate";
}
std::cerr << ";" << std::setw(18) << *ptr
<< ";" << std::setw(12) << size
<< ";" << std::setw(12) << allocated_size
<< ";" << std::setw(12) << pool->get_used_mem()
<< ";" << std::setw(12) << pool->get_free_mem();
size_t gpu_free_mem, gpu_total_mem;
hipMemGetInfo(&gpu_free_mem, &gpu_total_mem);
std::cerr << ";" << std::setw(12) << gpu_free_mem
<< ";" << std::setw(12) << gpu_total_mem;
std::cerr << std::endl;
}
#endif
return error;
}
hipError_t hipFreeAsync(void *ptr)
{
AMGX_CPU_PROFILER("hipFreeAsync");
#ifdef AMGX_PRINT_MALLOC_CALL_STACK
#ifdef MULTIGPU
int rank;
MPI_Comm_rank( MPI_COMM_WORLD, &rank );
if (rank == 0)
#endif
{
std::cerr << "----" << std::endl;
std::cerr << "hipFreeAsync call stack:" << std::endl;
printStackTrace(std::cerr);
}
#endif
// We accept NULL pointers and we do nothing.
if ( ptr == NULL )
{
return hipSuccess;
}
MemoryManager &manager = MemoryManager::get_instance();
_thread_id thread_id = getCurrentThreadId();
#ifdef AMGX_PRINT_MEMORY_INFO
bool print_async = false, print_fallback = false;
#endif
DeviceMemoryPool *pool = manager.m_main_device_pool;
size_t freed_size = 0;
hipError_t status = hipSuccess;
MemoryManager::DevicePoolMap::iterator it_pool = manager.m_thread_device_pools.find(thread_id);
if ( it_pool != manager.m_thread_device_pools.end() )
{
pool = manager.m_thread_device_pools[thread_id];
}
if ( pool != NULL && pool->is_allocated(ptr) )
{
pool->free(ptr, freed_size);
}
else if ( pool != NULL && manager.m_use_async_free )
{
#ifdef AMGX_PRINT_MEMORY_INFO
#ifdef MULTIGPU
if (rank == 0)
#endif
{
print_async = true;
std::cerr << "INFO: Async free [ptr " << std::setw(18) << ptr << "]" << std::endl;
}
#endif
MemoryManager::MemoryBlockMap::iterator ptr_it = manager.m_allocated_blocks.find(ptr);
if ( ptr_it == manager.m_allocated_blocks.end() )
{
FatalError("INTERNAL ERROR: Invalid call to hipFreeAsync", AMGX_ERR_UNKNOWN);
}
pool->add_memory(ptr, ptr_it->second);
manager.m_mutex.lock();
manager.m_allocated_blocks.erase(ptr_it);
manager.m_mutex.unlock();
}
else
{
#ifdef AMGX_PRINT_MEMORY_INFO
print_fallback = true;
#endif
status = ::hipFree(ptr);
}
#ifdef AMGX_PRINT_MEMORY_INFO
#ifdef MULTIGPU
if (rank == 0)
#endif
{
if ( print_fallback )
{
std::cerr << "hipFree ";
}
else if ( print_async )
{
std::cerr << "pool::async ";
}
else
{
std::cerr << "pool::free ";
}
std::cerr << ";" << std::setw(18) << ptr
<< ";" << std::setw(12) << freed_size
<< ";" << std::setw(12) << pool->get_used_mem()
<< ";" << std::setw(12) << pool->get_free_mem();
size_t gpu_free_mem, gpu_total_mem;
hipMemGetInfo(&gpu_free_mem, &gpu_total_mem);
std::cerr << ";" << std::setw(12) << gpu_free_mem
<< ";" << std::setw(12) << gpu_total_mem;
std::cerr << std::endl;
}
#endif
return status;
}
void cudaFreeWait()
{
}
// Join device pools
void joinPinnedPools()
{
MemoryManager &manager = MemoryManager::get_instance();
typedef MemoryManager::PinnedPoolMap::iterator Iterator;
Iterator it = manager.m_thread_pinned_pools.begin();
Iterator end = manager.m_thread_pinned_pools.end();
for ( ; it != end ; ++it )
{
manager.sync_pinned_pool(it->second);
}
}
void joinDevicePools()
{
MemoryManager &manager = MemoryManager::get_instance();
typedef MemoryManager::DevicePoolMap::iterator Iterator;
Iterator it = manager.m_thread_device_pools.begin();
Iterator end = manager.m_thread_device_pools.end();
for ( ; it != end ; ++it )
{
manager.sync_device_pool(it->second);
}
}
void printInfo()
{
//
}
void expandDeviceMemoryPool(size_t size, size_t max_block_size)
{
MemoryManager &manager = MemoryManager::get_instance();
if (manager.m_main_device_pool)
{
manager.m_main_device_pool->expandPool(size, max_block_size);
}
}
} // namespace memory
} // namespace amgx
| ac04f991684bbb2c7414238f68d31a50cea5d785.cu | /* Copyright (c) 2013-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <global_thread_handle.h>
#include <iostream>
#include <memory>
#include <error.h>
#include <limits>
#include <vector>
#include <cassert>
#include <amgx_timer.h>
#include <algorithm>
#include <iomanip>
#if defined(_WIN32)
#include <stddef.h>
#else
#include <inttypes.h>
#endif
#define PAGE_SIZE 4096
// threshold to consider using pre-allocated pool
#define PINNED_POOL_SIZE_THRESHOLD (100*1024*1024)
// 8 MB for pool allocations on host & device
#define PINNED_POOL_SIZE ( 100 * 1024 * 1024)
// set that macro on if you want to see print info
// #define AMGX_PRINT_MEMORY_INFO 1
// set that macro to print the call stack for each malloc/free (it's extensive).
// #define AMGX_PRINT_MALLOC_CALL_STACK 1
// #define MULTIGPU 1
_thread_id getCurrentThreadId()
{
#ifdef WIN32
return GetCurrentThreadId();
#else
return pthread_self();
#endif
}
namespace amgx
{
namespace memory
{
MemoryPool::MemoryPool(size_t max_block_size, size_t page_size, size_t max_size)
: m_size(0)
, m_max_size(max_size)
, m_max_block_size(max_block_size)
, m_page_size(page_size)
, m_free_mem(0)
, m_used_blocks()
, m_free_blocks()
, m_recently_merged(false)
{
//initializeCriticalSection(&m_mutex2);
}
MemoryPool::~MemoryPool()
{
#ifdef MULTIGPU
int rank;
MPI_Comm_rank( MPI_COMM_WORLD, &rank );
if (rank == 0)
{
#endif
if ( !m_used_blocks.empty() )
{
std::cerr << "!!! detected some memory leaks in the code: trying to free non-empty temporary device pool !!!" << std::endl;
for ( MemoryBlockListIterator it = m_used_blocks.begin() ; it != m_used_blocks.end() ; ++it )
{
std::cerr << "ptr: " << std::setw(18) << (void *) get_block_begin(it) << " size: " << get_block_size(it) << std::endl;
}
}
//deleteCriticalSection(&m_mutex2);
#ifdef MULTIGPU
}
#endif
}
void MemoryPool::add_memory(void *ptr, size_t size, bool managed)
{
if (m_max_size != 0 && managed && (size + m_size > m_max_size))
{
FatalError("Memory pool limit is reached", AMGX_ERR_NO_MEMORY);
}
m_mutex2.lock();
m_owned_ptrs.push_back(MemoryBlock(ptr, size, true, managed));
char *aligned_ptr = (char *) ptr;
if ( (size_t) aligned_ptr % m_page_size )
{
aligned_ptr = (char *) ((((size_t) aligned_ptr + m_page_size - 1) / m_page_size) * m_page_size);
}
size_t free_size = size - (aligned_ptr - (char *) ptr);
#ifdef AMGX_PRINT_MEMORY_INFO
// std::cerr << "INFO: Adding memory block " << (void*) aligned_ptr << " " << free_size << std::endl;
#endif
m_free_blocks.push_back(MemoryBlock(aligned_ptr, free_size, true, managed));
m_size += free_size;
m_free_mem += free_size;
m_mutex2.unlock();
}
void *MemoryPool::allocate(size_t size, size_t &allocated_size)
{
m_mutex2.lock();
void *ptr = NULL;
// Fail if the size is 0.
if ( size == 0 )
{
FatalError("Allocating memory buffer of size 0!!!", AMGX_ERR_BAD_PARAMETERS);
}
// The memory size we are actually going to allocate.
size_t aligned_size = m_page_size * ((size + m_page_size - 1) / m_page_size);
// The chosen block (if any).
MemoryBlockListIterator best_it = m_free_blocks.end();
// The best cost (wasted amount of memory).
size_t best_cost = std::numeric_limits<size_t>::max();
// The address of the first correctly aligned region we're interested in.
char *best_aligned_ptr = NULL;
// Look for a large enough block.
for ( MemoryBlockListIterator it = m_free_blocks.begin() ; it != m_free_blocks.end() ; ++it )
{
#ifdef AMGX_PRINT_MEMORY_INFO
#ifdef MULTIGPU
int rank;
MPI_Comm_rank( MPI_COMM_WORLD, &rank );
if (rank == 0)
#endif
{
std::cerr << "INFO: [block " << std::setw(18) << (void *) get_block_begin(it)
<< " " << std::setw(12) << get_block_size(it) << std::endl;
}
#endif
// Get an aligned pointer.
char *aligned_ptr = get_block_begin(it);
// Make sure alignments are fine. It shouldn't be needed but it's actually cheap to test.
if ( (size_t) aligned_ptr & (m_page_size - 1) )
{
FatalError("INTERNAL ERROR: Invalid alignment!!!", AMGX_ERR_UNKNOWN);
}
// If the pointer fits in that block, just keep it.
if ( aligned_size > get_block_size(it) )
{
continue;
}
// The cost.
size_t cost = get_block_size(it) - aligned_size;
// If the cost is better, keep it.
if ( cost < best_cost )
{
best_it = it;
best_cost = cost;
best_aligned_ptr = aligned_ptr;
}
}
// No block found??? Fallback to regular malloc treated outside of this function.
if ( best_it == m_free_blocks.end() )
{
allocated_size = 0;
m_mutex2.unlock();
return ptr;
}
// Our allocation starts at aligned_ptr.
ptr = best_aligned_ptr;
// Allocated size.
allocated_size = aligned_size;
// Store the used block.
MemoryBlock used_block(best_aligned_ptr, aligned_size, is_block_first(best_it));
m_used_blocks.push_back(used_block);
// Update statistics.
m_free_mem -= aligned_size;
// We store the pointer to the beginning of the block.
char *block_begin = get_block_begin(best_it);
// ... and its size.
size_t block_size = get_block_size(best_it);
// We use all the block. Simply remove it.
if ( best_aligned_ptr == block_begin && aligned_size == block_size )
{
m_free_blocks.erase(best_it);
}
else
{
set_block_begin(best_it, best_aligned_ptr + aligned_size);
set_block_size (best_it, block_size - aligned_size);
best_it->m_first = false;
}
m_mutex2.unlock();
// fallback to regular malloc treated outside of this function
return ptr;
}
void MemoryPool::free(void *ptr, size_t &freed_size)
{
m_mutex2.lock();
// Find the element to remove.
MemoryBlockListIterator it = m_used_blocks.begin();
for ( ; it != m_used_blocks.end() ; ++it )
if ( get_block_begin(it) == ptr )
{
break;
}
// Sanity check.
if ( it == m_used_blocks.end() )
{
FatalError("INTERNAL ERROR: Invalid iterator!!!", AMGX_ERR_UNKNOWN);
}
// We keep the pointers sorted. So find where to insert the new block.
MemoryBlockListIterator insert_it = m_free_blocks.begin();
for ( ; insert_it != m_free_blocks.end() ; ++insert_it )
{
// Same pointer in used and free... That's surely a bug.
if ( get_block_begin(insert_it) == get_block_begin(it) )
{
FatalError("INTERNAL ERROR: Invalid memory block iterator!!! Free was called twice on same pointer.", AMGX_ERR_UNKNOWN);
}
if ( get_block_begin(insert_it) > get_block_begin(it) )
{
break;
}
}
m_free_blocks.insert(insert_it, *it);
// We merge contiguous blocks.
MemoryBlockListIterator first = m_free_blocks.begin();
MemoryBlockListIterator last = m_free_blocks.begin();
char *last_ptr = get_block_begin(first) + get_block_size(first);
size_t merged_size = get_block_size(first);
int num_merged_blocks = 0;
for ( ++last ; last != m_free_blocks.end() ; ++last )
{
if ( last_ptr != get_block_begin(last) || is_block_first(last) ) // We won't merge those two.
{
if ( num_merged_blocks != 0 ) // We have found the end of the block.
{
break;
}
// We have found nothing to merge... Shift the window.
first = last;
last_ptr = get_block_begin(first) + get_block_size(first);
merged_size = get_block_size(first);
}
else
{
last_ptr = get_block_begin(last) + get_block_size(last);
merged_size += get_block_size(last);
num_merged_blocks++;
}
}
#ifdef AMGX_PRINT_MEMORY_INFO
#ifdef MULTIGPU
int rank;
MPI_Comm_rank( MPI_COMM_WORLD, &rank );
if (rank == 0)
#endif
{
std::cerr << "INFO: Merging " << num_merged_blocks << " blocks" << std::endl;
}
#endif
if ( num_merged_blocks != 0 ) // Do the merge.
{
set_block_size(first, merged_size);
first++;
m_free_blocks.erase(first, last);
}
// Remove the used block and update statistics.
m_free_mem += get_block_size(it);
m_used_blocks.erase(it);
//m_recently_merged = true;
m_mutex2.unlock();
}
void MemoryPool::free_all()
{
m_mutex2.lock();
m_used_blocks.clear();
m_free_blocks.clear();
std::vector<MemoryBlock> owned_ptrs = m_owned_ptrs;
m_owned_ptrs.clear();
for ( size_t i = 0 ; i < owned_ptrs.size() ; ++i )
{
add_memory(owned_ptrs[i].m_begin, owned_ptrs[i].m_size, owned_ptrs[i].m_managed);
}
m_free_mem = m_size;
m_mutex2.unlock();
}
bool MemoryPool::is_allocated(void *ptr)
{
m_mutex2.lock();
for ( MemoryBlockListConstIterator it = m_used_blocks.begin() ; it != m_used_blocks.end() ; ++it )
if ( it->m_begin == ptr )
{
m_mutex2.unlock();
return true;
}
m_mutex2.unlock();
return false;
}
PinnedMemoryPool::PinnedMemoryPool()
: MemoryPool(PINNED_POOL_SIZE_THRESHOLD, 4096, 0)
{
void *ptr = NULL;
::cudaMallocHost(&ptr, PINNED_POOL_SIZE);
if ( ptr == NULL )
{
FatalError("Cannot allocate pinned memory", AMGX_ERR_NO_MEMORY);
}
add_memory(ptr, PINNED_POOL_SIZE);
}
PinnedMemoryPool::~PinnedMemoryPool()
{
for ( size_t i = 0 ; i < m_owned_ptrs.size() ; ++i )
if (m_owned_ptrs[i].m_managed)
{
::cudaFreeHost(m_owned_ptrs[i].m_begin);
}
m_owned_ptrs.clear();
}
DeviceMemoryPool::DeviceMemoryPool(size_t size,
size_t max_block_size,
size_t max_size)
: MemoryPool(max_block_size, 4096, max_size)
{
if (max_size > 0 && size > max_size)
{
FatalError("Initial size for the memory pool specified is more than memory limit", AMGX_ERR_NO_MEMORY);
}
void *ptr = NULL;
::cudaMalloc(&ptr, size);
if ( ptr == NULL )
{
FatalError("Cannot allocate device memory", AMGX_ERR_NO_MEMORY);
}
add_memory(ptr, size);
}
void DeviceMemoryPool::expandPool(size_t size,
size_t max_block_size)
{
if (this->m_max_size > 0 && (size + this->m_size) > this->m_max_size)
{
FatalError("Pool memory size is exceeded.", AMGX_ERR_NO_MEMORY);
}
void *ptr = NULL;
::cudaMalloc(&ptr, size);
if ( ptr == NULL )
{
FatalError("Cannot allocate device memory", AMGX_ERR_NO_MEMORY);
}
add_memory(ptr, size);
}
DeviceMemoryPool::~DeviceMemoryPool()
{
for ( size_t i = 0 ; i < m_owned_ptrs.size() ; ++i )
if (m_owned_ptrs[i].m_managed)
{
::cudaFree(m_owned_ptrs[i].m_begin);
}
m_owned_ptrs.clear();
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////
struct MemoryManager
{
// Get the global instance.
static MemoryManager &get_instance()
{
static MemoryManager s_instance;
return s_instance;
}
// Ctor.
MemoryManager()
: m_main_pinned_pool(NULL)
, m_main_device_pool(NULL)
, m_use_async_free(false)
, m_use_device_pool(false)
, m_alloc_scaling_factor(0)
, m_alloc_scaling_threshold(16 * 1024 * 1024)
{
//initializeCriticalSection(&m_mutex);
}
// Dtor.
~MemoryManager()
{
//deleteCriticalSection(&m_mutex);
}
// Synchronize a device pool.
void sync_pinned_pool(PinnedMemoryPool *pool);
void sync_device_pool(DeviceMemoryPool *pool);
// Scale a memory size.
size_t scale(size_t size) const
{
size_t new_size = size;
if ( size >= m_alloc_scaling_threshold )
{
new_size += m_alloc_scaling_factor * (size / 100);
}
return new_size;
}
// Mutex to make functions thread-safe.
std::recursive_mutex m_mutex;
// Streams.
typedef std::map<_thread_id, cudaStream_t> StreamMap;
StreamMap m_thread_stream;
cudaStream_t m_main_stream;
// Items to free (async free).
// typedef std::map<_thread_id, std::vector<void*> > AsyncFreeMap;
// AsyncFreeMap m_thread_free;
// std::vector<void*> m_main_free;
// Pinned pools.
typedef std::map<_thread_id, PinnedMemoryPool *> PinnedPoolMap;
PinnedPoolMap m_thread_pinned_pools;
PinnedMemoryPool *m_main_pinned_pool;
// Device pools.
typedef std::map<_thread_id, DeviceMemoryPool *> DevicePoolMap;
DevicePoolMap m_thread_device_pools;
DeviceMemoryPool *m_main_device_pool;
// Registered memory blocks.
typedef std::vector<std::pair<void *, void *> > RegisteredBlocks;
typedef std::map<_thread_id, RegisteredBlocks> RegisteredBlocksMap;
RegisteredBlocksMap m_thread_registered;
RegisteredBlocks m_main_registered;
// We keep a list of allocations that go through cudaMalloc.
typedef std::map<void *, size_t> MemoryBlockMap;
MemoryBlockMap m_allocated_blocks;
// whether we want to use async free/wait or regular free.
bool m_use_async_free;
// whether we want to use device pool or simply do regular malloc.
bool m_use_device_pool;
// Scaling factor.
size_t m_alloc_scaling_factor;
// Scaling threshold.
size_t m_alloc_scaling_threshold;
};
void MemoryManager::sync_pinned_pool(PinnedMemoryPool *pool)
{
MemoryPool *mem_pool = (MemoryPool *) pool;
assert(mem_pool);
MemoryPool *main_pool = (MemoryPool *) m_main_pinned_pool;
main_pool->m_used_blocks.insert(main_pool->m_used_blocks.end(),
mem_pool->get_used_begin(),
mem_pool->get_used_end());
mem_pool->free_all();
}
void MemoryManager::sync_device_pool(DeviceMemoryPool *pool)
{
MemoryPool *mem_pool = (MemoryPool *) pool;
assert(mem_pool);
MemoryPool *main_pool = (MemoryPool *) m_main_device_pool;
main_pool->m_used_blocks.insert(main_pool->m_used_blocks.end(),
mem_pool->get_used_begin(),
mem_pool->get_used_end());
mem_pool->free_all();
}
bool hasPinnedMemoryPool()
{
MemoryManager &manager = MemoryManager::get_instance();
return manager.m_main_pinned_pool != NULL;
}
bool hasDeviceMemoryPool()
{
MemoryManager &manager = MemoryManager::get_instance();
return manager.m_main_device_pool != NULL;
}
void setPinnedMemoryPool(PinnedMemoryPool *pool)
{
MemoryManager &manager = MemoryManager::get_instance();
manager.m_mutex.lock();
manager.m_main_pinned_pool = pool;
manager.m_mutex.unlock();
}
void setDeviceMemoryPool(DeviceMemoryPool *pool)
{
MemoryManager &manager = MemoryManager::get_instance();
manager.m_mutex.lock();
manager.m_main_device_pool = pool;
manager.m_mutex.unlock();
}
void setPinnedMemoryPool(_thread_id thread_id, PinnedMemoryPool *pool)
{
MemoryManager &manager = MemoryManager::get_instance();
manager.m_mutex.lock();
manager.m_thread_pinned_pools[thread_id] = pool;
manager.m_mutex.unlock();
}
void setDeviceMemoryPool(_thread_id thread_id, DeviceMemoryPool *pool)
{
MemoryManager &manager = MemoryManager::get_instance();
manager.m_mutex.lock();
manager.m_thread_device_pools[thread_id] = pool;
manager.m_mutex.unlock();
}
void destroyPinnedMemoryPool()
{
MemoryManager &manager = MemoryManager::get_instance();
manager.m_mutex.lock();
delete manager.m_main_pinned_pool;
manager.m_main_pinned_pool = NULL;
manager.m_mutex.unlock();
}
void destroyDeviceMemoryPool()
{
MemoryManager &manager = MemoryManager::get_instance();
manager.m_mutex.lock();
delete manager.m_main_device_pool;
manager.m_main_device_pool = NULL;
manager.m_mutex.unlock();
}
void destroyPinnedMemoryPool(_thread_id thread_id)
{
MemoryManager &manager = MemoryManager::get_instance();
manager.m_mutex.lock();
MemoryManager::PinnedPoolMap::iterator it = manager.m_thread_pinned_pools.find(thread_id);
if ( it == manager.m_thread_pinned_pools.end() )
{
FatalError("INTERNAL ERROR: Invalid pinned memory pool", AMGX_ERR_UNKNOWN);
}
delete it->second;
manager.m_thread_pinned_pools.erase(it);
manager.m_mutex.unlock();
}
void destroyDeviceMemoryPool(_thread_id thread_id)
{
MemoryManager &manager = MemoryManager::get_instance();
manager.m_mutex.lock();
MemoryManager::DevicePoolMap::iterator it = manager.m_thread_device_pools.find(thread_id);
if ( it == manager.m_thread_device_pools.end() )
{
FatalError("INTERNAL ERROR: Invalid device memory pool", AMGX_ERR_UNKNOWN);
}
delete it->second;
manager.m_thread_device_pools.erase(it);
manager.m_mutex.unlock();
}
void destroyAllPinnedMemoryPools()
{
MemoryManager &manager = MemoryManager::get_instance();
manager.m_mutex.lock();
MemoryManager::PinnedPoolMap::iterator it = manager.m_thread_pinned_pools.begin();
for ( ; it != manager.m_thread_pinned_pools.end() ; ++it )
{
delete it->second;
manager.m_thread_pinned_pools.erase(it);
}
destroyPinnedMemoryPool();
manager.m_mutex.unlock();
}
void destroyAllDeviceMemoryPools()
{
MemoryManager &manager = MemoryManager::get_instance();
manager.m_mutex.lock();
MemoryManager::DevicePoolMap::iterator it = manager.m_thread_device_pools.begin();
for ( ; it != manager.m_thread_device_pools.end() ; ++it )
{
delete it->second;
manager.m_thread_device_pools.erase(it);
}
destroyDeviceMemoryPool();
manager.m_mutex.unlock();
}
void setAsyncFreeFlag(bool set)
{
MemoryManager &manager = MemoryManager::get_instance();
manager.m_use_async_free = set;
}
void setDeviceMemoryPoolFlag(bool set)
{
MemoryManager &manager = MemoryManager::get_instance();
manager.m_use_device_pool = set;
}
void setMallocScalingFactor(size_t factor)
{
MemoryManager &manager = MemoryManager::get_instance();
manager.m_alloc_scaling_factor = factor;
}
void setMallocScalingThreshold(size_t threshold)
{
MemoryManager &manager = MemoryManager::get_instance();
manager.m_alloc_scaling_threshold = threshold;
}
void createAsyncFreePool(_thread_id thread_id)
{
/*
MemoryManager &manager = MemoryManager::get_instance();
manager.m_mutex.lock();
manager.m_thread_free[thread_id] = std::vector<void*>();
manager.m_mutex.unlock(); */
}
void setMainStream(cudaStream_t stream)
{
MemoryManager &manager = MemoryManager::get_instance();
manager.m_main_stream = stream;
}
void setStream(_thread_id thread_id, cudaStream_t stream)
{
MemoryManager &manager = MemoryManager::get_instance();
manager.m_mutex.lock();
manager.m_thread_stream[thread_id] = stream;
manager.m_mutex.unlock();
}
cudaStream_t getStream()
{
MemoryManager &manager = MemoryManager::get_instance();
_thread_id thread_id = getCurrentThreadId();
MemoryManager::StreamMap::iterator it = manager.m_thread_stream.find(thread_id);
if ( it != manager.m_thread_stream.end() )
{
return it->second;
}
return manager.m_main_stream;
}
void cudaHostRegister(void *ptr, int size)
{
MemoryManager &manager = MemoryManager::get_instance();
_thread_id thread_id = getCurrentThreadId();
MemoryManager::RegisteredBlocks *blocks = &manager.m_main_registered;
MemoryManager::RegisteredBlocksMap::iterator it = manager.m_thread_registered.find(thread_id);
if ( it != manager.m_thread_registered.end() )
{
blocks = &it->second;
}
bool reg = true;
for ( size_t i = 0; i < blocks->size() ; ++i )
if ( blocks->at(i).first <= ptr && blocks->at(i).second >= ptr)
{
reg = false;
break;
}
if ( reg )
{
::cudaHostRegister(ptr, size, 0);
blocks->push_back(std::pair<void *, void *>(ptr, (char *)ptr + size));
}
}
cudaError_t cudaMallocHost(void **ptr, size_t size)
{
MemoryManager &manager = MemoryManager::get_instance();
_thread_id thread_id = getCurrentThreadId();
PinnedMemoryPool *pool = manager.m_main_pinned_pool;
MemoryManager::PinnedPoolMap::iterator it = manager.m_thread_pinned_pools.find(thread_id);
if ( it != manager.m_thread_pinned_pools.end() )
{
pool = it->second;
}
size_t allocated_size = 0;
cudaError_t error = cudaSuccess;
void *new_ptr = NULL;
if ( pool != NULL && size < PINNED_POOL_SIZE_THRESHOLD )
{
new_ptr = pool->allocate(size, allocated_size);
}
if ( pool != NULL && new_ptr == NULL && size < PINNED_POOL_SIZE_THRESHOLD ) // retry with size
{
new_ptr = pool->allocate(size, allocated_size);
}
if ( new_ptr != NULL )
{
*ptr = new_ptr;
}
else
{
//printf("calling cudaMallocHost, size = %lu\n",size);
error = ::cudaMallocHost(ptr, size);
}
return error;
}
cudaError_t cudaFreeHost(void *ptr)
{
MemoryManager &manager = MemoryManager::get_instance();
_thread_id thread_id = getCurrentThreadId();
PinnedMemoryPool *pool = manager.m_main_pinned_pool;
MemoryManager::PinnedPoolMap::iterator it = manager.m_thread_pinned_pools.find(thread_id);
if ( it != manager.m_thread_pinned_pools.end() )
{
pool = it->second;
}
size_t freed_size = 0;
cudaError_t error = cudaSuccess;
if ( pool != NULL && pool->is_allocated(ptr) )
{
pool->free(ptr, freed_size);
}
else
{
//printf("calling cudaFreeHost\n");
error = ::cudaFreeHost(ptr);
}
return error;
}
cudaError_t cudaMalloc(void **ptr, size_t size)
{
AMGX_CPU_PROFILER("cudaMalloc");
#ifdef AMGX_PRINT_MALLOC_CALL_STACK
#ifdef MULTIGPU
int rank;
MPI_Comm_rank( MPI_COMM_WORLD, &rank );
if (rank == 0)
#endif
{
std::cerr << "----" << std::endl;
std::cerr << "cudaMalloc call stack:" << std::endl;
printStackTrace(std::cerr);
}
#endif
MemoryManager &manager = MemoryManager::get_instance();
DeviceMemoryPool *pool = manager.m_main_device_pool;
_thread_id thread_id = getCurrentThreadId();
MemoryManager::DevicePoolMap::iterator it = manager.m_thread_device_pools.find(thread_id);
if ( it != manager.m_thread_device_pools.end() )
{
pool = it->second;
}
bool use_pool = manager.m_use_device_pool;
#ifdef AMGX_PRINT_MEMORY_INFO
bool print_fallback = false;
#endif
size_t allocated_size = 0;
cudaError_t error = cudaSuccess;
void *new_ptr = NULL;
if ( pool != NULL /*&& size < pool->get_max_block_size()*/ && use_pool )
{
new_ptr = pool->allocate(size, allocated_size);
}
if ( new_ptr != NULL )
{
*ptr = new_ptr;
}
else
{
#ifdef AMGX_PRINT_MEMORY_INFO
print_fallback = true;
#endif
// We allocate an extra fraction here.
allocated_size = manager.scale(size);
// We hack the size to make it a multiple of a page size.
allocated_size = PAGE_SIZE * ((allocated_size + PAGE_SIZE - 1) / PAGE_SIZE);
error = ::cudaMalloc(ptr, allocated_size);
// Very last attempt. Try without over allocation.
if ( *ptr == NULL )
{
allocated_size = size;
error = ::cudaMalloc(ptr, allocated_size);
}
manager.m_mutex.lock();
manager.m_allocated_blocks[*ptr] = allocated_size;
manager.m_mutex.unlock();
#ifdef AMGX_PRINT_MEMORY_INFO
#ifdef MULTIGPU
if (rank == 0)
#endif
{
std::cerr << "INFO: Registered [block " << std::setw(18) << *ptr << " size: " << allocated_size << "]" << std::endl;
}
#endif
}
#ifdef AMGX_PRINT_MEMORY_INFO
#ifdef MULTIGPU
if (rank == 0)
#endif
{
if ( print_fallback )
{
std::cerr << "cudaMalloc ";
}
else
{
std::cerr << "pool::allocate";
}
std::cerr << ";" << std::setw(18) << *ptr
<< ";" << std::setw(12) << size
<< ";" << std::setw(12) << allocated_size
<< ";" << std::setw(12) << pool->get_used_mem()
<< ";" << std::setw(12) << pool->get_free_mem();
size_t gpu_free_mem, gpu_total_mem;
cudaMemGetInfo(&gpu_free_mem, &gpu_total_mem);
std::cerr << ";" << std::setw(12) << gpu_free_mem
<< ";" << std::setw(12) << gpu_total_mem;
std::cerr << std::endl;
}
#endif
return error;
}
cudaError_t cudaFreeAsync(void *ptr)
{
AMGX_CPU_PROFILER("cudaFreeAsync");
#ifdef AMGX_PRINT_MALLOC_CALL_STACK
#ifdef MULTIGPU
int rank;
MPI_Comm_rank( MPI_COMM_WORLD, &rank );
if (rank == 0)
#endif
{
std::cerr << "----" << std::endl;
std::cerr << "cudaFreeAsync call stack:" << std::endl;
printStackTrace(std::cerr);
}
#endif
// We accept NULL pointers and we do nothing.
if ( ptr == NULL )
{
return cudaSuccess;
}
MemoryManager &manager = MemoryManager::get_instance();
_thread_id thread_id = getCurrentThreadId();
#ifdef AMGX_PRINT_MEMORY_INFO
bool print_async = false, print_fallback = false;
#endif
DeviceMemoryPool *pool = manager.m_main_device_pool;
size_t freed_size = 0;
cudaError_t status = cudaSuccess;
MemoryManager::DevicePoolMap::iterator it_pool = manager.m_thread_device_pools.find(thread_id);
if ( it_pool != manager.m_thread_device_pools.end() )
{
pool = manager.m_thread_device_pools[thread_id];
}
if ( pool != NULL && pool->is_allocated(ptr) )
{
pool->free(ptr, freed_size);
}
else if ( pool != NULL && manager.m_use_async_free )
{
#ifdef AMGX_PRINT_MEMORY_INFO
#ifdef MULTIGPU
if (rank == 0)
#endif
{
print_async = true;
std::cerr << "INFO: Async free [ptr " << std::setw(18) << ptr << "]" << std::endl;
}
#endif
MemoryManager::MemoryBlockMap::iterator ptr_it = manager.m_allocated_blocks.find(ptr);
if ( ptr_it == manager.m_allocated_blocks.end() )
{
FatalError("INTERNAL ERROR: Invalid call to cudaFreeAsync", AMGX_ERR_UNKNOWN);
}
pool->add_memory(ptr, ptr_it->second);
manager.m_mutex.lock();
manager.m_allocated_blocks.erase(ptr_it);
manager.m_mutex.unlock();
}
else
{
#ifdef AMGX_PRINT_MEMORY_INFO
print_fallback = true;
#endif
status = ::cudaFree(ptr);
}
#ifdef AMGX_PRINT_MEMORY_INFO
#ifdef MULTIGPU
if (rank == 0)
#endif
{
if ( print_fallback )
{
std::cerr << "cudaFree ";
}
else if ( print_async )
{
std::cerr << "pool::async ";
}
else
{
std::cerr << "pool::free ";
}
std::cerr << ";" << std::setw(18) << ptr
<< ";" << std::setw(12) << freed_size
<< ";" << std::setw(12) << pool->get_used_mem()
<< ";" << std::setw(12) << pool->get_free_mem();
size_t gpu_free_mem, gpu_total_mem;
cudaMemGetInfo(&gpu_free_mem, &gpu_total_mem);
std::cerr << ";" << std::setw(12) << gpu_free_mem
<< ";" << std::setw(12) << gpu_total_mem;
std::cerr << std::endl;
}
#endif
return status;
}
void cudaFreeWait()
{
}
// Join device pools
void joinPinnedPools()
{
MemoryManager &manager = MemoryManager::get_instance();
typedef MemoryManager::PinnedPoolMap::iterator Iterator;
Iterator it = manager.m_thread_pinned_pools.begin();
Iterator end = manager.m_thread_pinned_pools.end();
for ( ; it != end ; ++it )
{
manager.sync_pinned_pool(it->second);
}
}
void joinDevicePools()
{
MemoryManager &manager = MemoryManager::get_instance();
typedef MemoryManager::DevicePoolMap::iterator Iterator;
Iterator it = manager.m_thread_device_pools.begin();
Iterator end = manager.m_thread_device_pools.end();
for ( ; it != end ; ++it )
{
manager.sync_device_pool(it->second);
}
}
void printInfo()
{
//
}
void expandDeviceMemoryPool(size_t size, size_t max_block_size)
{
MemoryManager &manager = MemoryManager::get_instance();
if (manager.m_main_device_pool)
{
manager.m_main_device_pool->expandPool(size, max_block_size);
}
}
} // namespace memory
} // namespace amgx
|
d9d338d4004ca9dc6203eb7ce22577be4d7fc7f1.hip | // !!! This is a file automatically generated by hipify!!!
/* ************************************************************************** */
/* */
/* ::: :::::::: */
/* mandelbrot.cu :+: :+: :+: */
/* +:+ +:+ +:+ */
/* By: adoussau <[email protected]> +#+ +:+ +#+ */
/* +#+#+#+#+#+ +#+ */
/* Created: 2015/02/03 16:08:27 by adoussau #+# #+# */
/* Updated: 2015/02/03 16:08:30 by adoussau ### ########.fr */
/* */
/* ************************************************************************** */
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
# define WIN_SZ_X 1024
# define WIN_SZ_Y 1024
# define TYPE double
__global__ void glynn(int *d_tab, double offx, double offy, double zoom, int ite_max, int winszx, int winszy)
{
float temp = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int index = row * winszx + col;
if(col >= winszx || row >= winszy)
return;
TYPE z_r = (((double)col + offx) / zoom) + -2.1;
TYPE z_i = (((double)row + offy) / zoom) + -1.2;
unsigned int i = 0;
while(sqrt(z_r * z_r + z_i * z_i) < 1 && i < it_max)
{
temp = z_r;
z_r = sqrt((z_r * z_r - z_i * z_i) * (z_r * z_r - z_i * z_i)) + c_r;
z_i = sqrt((2 * z_i * temp)*(2 * z_i * temp)) + c_i;
i++;
}
d_tab[index] = i;
}
extern "C" void call_glynn(int *tab, double offx, double offy, double zoom, int ite_max, int winszx, int winszy)
{
int *d_tab = NULL;
int size = 0;
dim3 block_size(16, 16);
dim3 grid_size(WIN_SZ_X / block_size.x, WIN_SZ_Y / block_size.y);
size = WIN_SZ_Y * WIN_SZ_X * sizeof(int);
hipMalloc((void **)&d_tab, size);
hipLaunchKernelGGL(( glynn), dim3(grid_size),dim3(block_size), 0, 0, d_tab, offx, offy, zoom, ite_max, winszx, winszy);
hipMemcpy(tab, d_tab, size, hipMemcpyDeviceToHost);
hipFree(d_tab);
}
| d9d338d4004ca9dc6203eb7ce22577be4d7fc7f1.cu | /* ************************************************************************** */
/* */
/* ::: :::::::: */
/* mandelbrot.cu :+: :+: :+: */
/* +:+ +:+ +:+ */
/* By: adoussau <[email protected]> +#+ +:+ +#+ */
/* +#+#+#+#+#+ +#+ */
/* Created: 2015/02/03 16:08:27 by adoussau #+# #+# */
/* Updated: 2015/02/03 16:08:30 by adoussau ### ########.fr */
/* */
/* ************************************************************************** */
#include <stdlib.h>
#include <cuda.h>
#include <stdio.h>
# define WIN_SZ_X 1024
# define WIN_SZ_Y 1024
# define TYPE double
__global__ void glynn(int *d_tab, double offx, double offy, double zoom, int ite_max, int winszx, int winszy)
{
float temp = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int index = row * winszx + col;
if(col >= winszx || row >= winszy)
return;
TYPE z_r = (((double)col + offx) / zoom) + -2.1;
TYPE z_i = (((double)row + offy) / zoom) + -1.2;
unsigned int i = 0;
while(sqrt(z_r * z_r + z_i * z_i) < 1 && i < it_max)
{
temp = z_r;
z_r = sqrt((z_r * z_r - z_i * z_i) * (z_r * z_r - z_i * z_i)) + c_r;
z_i = sqrt((2 * z_i * temp)*(2 * z_i * temp)) + c_i;
i++;
}
d_tab[index] = i;
}
extern "C" void call_glynn(int *tab, double offx, double offy, double zoom, int ite_max, int winszx, int winszy)
{
int *d_tab = NULL;
int size = 0;
dim3 block_size(16, 16);
dim3 grid_size(WIN_SZ_X / block_size.x, WIN_SZ_Y / block_size.y);
size = WIN_SZ_Y * WIN_SZ_X * sizeof(int);
cudaMalloc((void **)&d_tab, size);
glynn<<<grid_size,block_size>>>(d_tab, offx, offy, zoom, ite_max, winszx, winszy);
cudaMemcpy(tab, d_tab, size, cudaMemcpyDeviceToHost);
cudaFree(d_tab);
}
|
a36a5f914687730bdaffbcf43592531bb7852943.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2014
@generated from zlarfgx-v2.cu normal z -> c, Sat Nov 15 19:53:59 2014
*/
#include "common_magma.h"
#include "commonblas_c.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define PRECISION_c
//==============================================================================
__global__
void magma_clarfgx_gpu_kernel( int n, magmaFloatComplex* dx0, magmaFloatComplex* dx,
magmaFloatComplex *dtau, float *dxnorm,
magmaFloatComplex *dA, int it)
{
const int i = threadIdx.x;
const int j = i + BLOCK_SIZE * blockIdx.x;
__shared__ magmaFloatComplex scale;
__shared__ float xnorm;
magmaFloatComplex dxi;
if ( j < n-1 )
dxi = dx[j];
if ( i == 0 ) {
xnorm = *dxnorm;
#if (defined(PRECISION_s) || defined(PRECISION_d))
float alpha = *dx0;
float alphai = MAGMA_C_ZERO;
if ( (xnorm == 0 && alphai == MAGMA_C_ZERO ) || n == 1 )
#else
magmaFloatComplex alpha = *dx0;
float alphar = MAGMA_C_REAL(alpha), alphai = MAGMA_C_IMAG(alpha);
if ( (xnorm == 0 && alphai == MAGMA_C_ZERO ) || n == 0 )
#endif
{
*dtau = MAGMA_C_ZERO;
*dA = *dx0;
}
else {
#if (defined(PRECISION_s) || defined(PRECISION_d))
// no need to compute the norm as it is passed as input
float beta = xnorm; // sqrt( alpha*alpha + xnorm*xnorm );
beta = -copysign( beta, alpha );
// todo: deal with badly scaled vectors (see lapack's larfg)
if (j==0){
*dtau = (beta - alpha) / beta;
//*dx0 = 1.; //cannot be done here because raise condition all threadblock need to read it for alpha
*dA = beta;
}
scale = 1. / (alpha - beta);
#else
// no need to compute the norm as it is passed as input
float beta = xnorm; // sqrt( alphar*alphar + alphai*alphai + xnorm*xnorm );
beta = -copysign( beta, alphar );
// todo: deal with badly scaled vectors (see lapack's larfg)
if (j==0){
*dtau = MAGMA_C_MAKE((beta - alphar)/beta, -alphai/beta);
//*dx0 = MAGMA_C_MAKE( 1., 0.); //cannot be done here because raise condition all threadblock need to read it for alpha
*dA = MAGMA_C_MAKE(beta, 0.);
}
alpha = MAGMA_C_MAKE( MAGMA_C_REAL(alpha) - beta, MAGMA_C_IMAG(alpha));
scale = MAGMA_C_DIV( MAGMA_C_ONE, alpha);
#endif
}
}
// scale x
__syncthreads();
if ( xnorm != 0 && j < n-1)
dx[j] = MAGMA_C_MUL(dxi, scale);
if (j<it){
*( dA-it+j) = *(dx0-it+j);
*(dx0-it+j) = MAGMA_C_MAKE(0., 0.);
}
}
//==============================================================================
/*
Generates Householder elementary reflector H = I - tau v v^T to reduce
H [ dx0 ] = [ beta ]
[ dx ] [ 0 ]
with beta = norm( [dx0, dx] ) = dxnorm[0].
Stores v over dx; first element of v is 1 and is not stored.
Stores beta over dx0.
Stores tau.
The difference with LAPACK's clarfg is that the norm of dx, and hance beta,
are computed outside the routine and passed to it in dxnorm (array on the GPU).
*/
extern "C" void
magma_clarfgx_gpu(
magma_int_t n,
magmaFloatComplex_ptr dx0,
magmaFloatComplex_ptr dx,
magmaFloatComplex_ptr dtau,
magmaFloat_ptr dxnorm,
magmaFloatComplex_ptr dA, magma_int_t iter)
{
dim3 blocks((n+BLOCK_SIZE-1) / BLOCK_SIZE);
dim3 threads( BLOCK_SIZE );
hipLaunchKernelGGL(( magma_clarfgx_gpu_kernel), dim3(blocks), dim3(threads), 0, magma_stream , n, dx0, dx, dtau, dxnorm, dA, iter);
}
//==============================================================================
/*
Generates Householder elementary reflector H = I - tau v v^T to reduce
H [ dx0 ] = [ beta ]
[ dx ] [ 0 ]
with beta = norm( [dx0, dx] ) = dxnorm[0].
Stores v over dx; first element of v is 1 and is not stored.
Stores beta over dx0.
Stores tau.
The difference with LAPACK's clarfg is that the norm of dx, and hance beta,
are computed outside the routine and passed to it in dxnorm (array on the GPU).
*/
extern "C" void
magma_clarfgtx_gpu(
magma_int_t n,
magmaFloatComplex_ptr dx0,
magmaFloatComplex_ptr dx,
magmaFloatComplex_ptr dtau,
magmaFloat_ptr dxnorm,
magmaFloatComplex_ptr dA, magma_int_t iter,
magmaFloatComplex_ptr V, magma_int_t ldv,
magmaFloatComplex_ptr T, magma_int_t ldt,
magmaFloatComplex_ptr dwork)
{
/* Generate the elementary reflector H(iter) */
magma_clarfgx_gpu(n, dx0, dx, dtau, dxnorm, dA, iter);
if (iter==0) {
magmaFloatComplex tt = MAGMA_C_ONE;
magmablas_clacpy(MagmaUpperLower, 1, 1, dtau, 1, T+iter+iter*ldt, 1);
magma_csetmatrix(1,1, &tt,1, dx0,1);
}
else {
/* Compute the iter-th column of T */
hipLaunchKernelGGL(( magma_cgemv_kernel3), dim3(iter), dim3(BLOCK_SIZE), 0, magma_stream , n, V, ldv, dx0, dwork, dtau );
hipLaunchKernelGGL(( magma_ctrmv_kernel2), dim3(iter), dim3(iter), 0, magma_stream , T, ldt, dwork, T+iter*ldt, dtau );
}
}
//==============================================================================
| a36a5f914687730bdaffbcf43592531bb7852943.cu | /*
-- MAGMA (version 1.6.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2014
@generated from zlarfgx-v2.cu normal z -> c, Sat Nov 15 19:53:59 2014
*/
#include "common_magma.h"
#include "commonblas_c.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define PRECISION_c
//==============================================================================
__global__
void magma_clarfgx_gpu_kernel( int n, magmaFloatComplex* dx0, magmaFloatComplex* dx,
magmaFloatComplex *dtau, float *dxnorm,
magmaFloatComplex *dA, int it)
{
const int i = threadIdx.x;
const int j = i + BLOCK_SIZE * blockIdx.x;
__shared__ magmaFloatComplex scale;
__shared__ float xnorm;
magmaFloatComplex dxi;
if ( j < n-1 )
dxi = dx[j];
if ( i == 0 ) {
xnorm = *dxnorm;
#if (defined(PRECISION_s) || defined(PRECISION_d))
float alpha = *dx0;
float alphai = MAGMA_C_ZERO;
if ( (xnorm == 0 && alphai == MAGMA_C_ZERO ) || n == 1 )
#else
magmaFloatComplex alpha = *dx0;
float alphar = MAGMA_C_REAL(alpha), alphai = MAGMA_C_IMAG(alpha);
if ( (xnorm == 0 && alphai == MAGMA_C_ZERO ) || n == 0 )
#endif
{
*dtau = MAGMA_C_ZERO;
*dA = *dx0;
}
else {
#if (defined(PRECISION_s) || defined(PRECISION_d))
// no need to compute the norm as it is passed as input
float beta = xnorm; // sqrt( alpha*alpha + xnorm*xnorm );
beta = -copysign( beta, alpha );
// todo: deal with badly scaled vectors (see lapack's larfg)
if (j==0){
*dtau = (beta - alpha) / beta;
//*dx0 = 1.; //cannot be done here because raise condition all threadblock need to read it for alpha
*dA = beta;
}
scale = 1. / (alpha - beta);
#else
// no need to compute the norm as it is passed as input
float beta = xnorm; // sqrt( alphar*alphar + alphai*alphai + xnorm*xnorm );
beta = -copysign( beta, alphar );
// todo: deal with badly scaled vectors (see lapack's larfg)
if (j==0){
*dtau = MAGMA_C_MAKE((beta - alphar)/beta, -alphai/beta);
//*dx0 = MAGMA_C_MAKE( 1., 0.); //cannot be done here because raise condition all threadblock need to read it for alpha
*dA = MAGMA_C_MAKE(beta, 0.);
}
alpha = MAGMA_C_MAKE( MAGMA_C_REAL(alpha) - beta, MAGMA_C_IMAG(alpha));
scale = MAGMA_C_DIV( MAGMA_C_ONE, alpha);
#endif
}
}
// scale x
__syncthreads();
if ( xnorm != 0 && j < n-1)
dx[j] = MAGMA_C_MUL(dxi, scale);
if (j<it){
*( dA-it+j) = *(dx0-it+j);
*(dx0-it+j) = MAGMA_C_MAKE(0., 0.);
}
}
//==============================================================================
/*
Generates Householder elementary reflector H = I - tau v v^T to reduce
H [ dx0 ] = [ beta ]
[ dx ] [ 0 ]
with beta = ±norm( [dx0, dx] ) = ±dxnorm[0].
Stores v over dx; first element of v is 1 and is not stored.
Stores beta over dx0.
Stores tau.
The difference with LAPACK's clarfg is that the norm of dx, and hance beta,
are computed outside the routine and passed to it in dxnorm (array on the GPU).
*/
extern "C" void
magma_clarfgx_gpu(
magma_int_t n,
magmaFloatComplex_ptr dx0,
magmaFloatComplex_ptr dx,
magmaFloatComplex_ptr dtau,
magmaFloat_ptr dxnorm,
magmaFloatComplex_ptr dA, magma_int_t iter)
{
dim3 blocks((n+BLOCK_SIZE-1) / BLOCK_SIZE);
dim3 threads( BLOCK_SIZE );
magma_clarfgx_gpu_kernel<<< blocks, threads, 0, magma_stream >>>( n, dx0, dx, dtau, dxnorm, dA, iter);
}
//==============================================================================
/*
Generates Householder elementary reflector H = I - tau v v^T to reduce
H [ dx0 ] = [ beta ]
[ dx ] [ 0 ]
with beta = ±norm( [dx0, dx] ) = ±dxnorm[0].
Stores v over dx; first element of v is 1 and is not stored.
Stores beta over dx0.
Stores tau.
The difference with LAPACK's clarfg is that the norm of dx, and hance beta,
are computed outside the routine and passed to it in dxnorm (array on the GPU).
*/
extern "C" void
magma_clarfgtx_gpu(
magma_int_t n,
magmaFloatComplex_ptr dx0,
magmaFloatComplex_ptr dx,
magmaFloatComplex_ptr dtau,
magmaFloat_ptr dxnorm,
magmaFloatComplex_ptr dA, magma_int_t iter,
magmaFloatComplex_ptr V, magma_int_t ldv,
magmaFloatComplex_ptr T, magma_int_t ldt,
magmaFloatComplex_ptr dwork)
{
/* Generate the elementary reflector H(iter) */
magma_clarfgx_gpu(n, dx0, dx, dtau, dxnorm, dA, iter);
if (iter==0) {
magmaFloatComplex tt = MAGMA_C_ONE;
magmablas_clacpy(MagmaUpperLower, 1, 1, dtau, 1, T+iter+iter*ldt, 1);
magma_csetmatrix(1,1, &tt,1, dx0,1);
}
else {
/* Compute the iter-th column of T */
magma_cgemv_kernel3<<< iter, BLOCK_SIZE, 0, magma_stream >>>( n, V, ldv, dx0, dwork, dtau );
magma_ctrmv_kernel2<<< iter, iter, 0, magma_stream >>>( T, ldt, dwork, T+iter*ldt, dtau );
}
}
//==============================================================================
|
adf1937924c312b102518aef47b00f21233c206d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file gdf-csr.cu code to convert a GDF matrix into a CSR
*
*/
#include <cudf/cudf.h>
#include <utilities/error_utils.hpp>
#include <rmm/rmm.h>
#include <rmm/thrust_rmm_allocator.h>
#include <thrust/scan.h>
#include <thrust/execution_policy.h>
using namespace std;
//--- all the private functions
template<typename T>
gdf_error runConverter(gdf_column **gdfData, csr_gdf *csrReturn, cudf::size_type * offsets);
//--- private CUDA functions / kernels
template<typename T>
__global__ void cudaCreateCSR(void *data, cudf::valid_type *valid, gdf_dtype dtype, int colID, T *A, int64_t *JA, cudf::size_type *offsets, cudf::size_type numRows);
__global__ void determineValidRecCount(cudf::valid_type *validArray, cudf::size_type numRows, cudf::size_type numCol, cudf::size_type * offset);
template<typename T>
__device__ T convertDataElement(gdf_column *gdf, int idx, gdf_dtype dtype);
__device__ int whichBitmapCSR(int record) { return (record/8); }
__device__ int whichBitCSR(int bit) { return (bit % 8); }
__device__ int checkBitCSR(cudf::valid_type data, int bit) {
cudf::valid_type bitMask[8] = {1, 2, 4, 8, 16, 32, 64, 128};
return (data & bitMask[bit]);
}
//
//------------------------------------------------------------
//
/*
* Convert a Dense GDF into a CSR GDF
*
* Restrictions: All columns need to be of the same length
*/
/**
* @brief convert a GDF into a CSR
*
* Take a matrix in GDF format and convert it into a CSR. The column major matrix needs to have every column defined.
* Passing in a COO datset will be treated as a two column matrix
*
* @param[in] gdfData the ordered list of columns
* @param[in] numCol the number of columns in the gdfData array
*
* @param[out] csrReturn a pointer to the returned data structure
*
* @return gdf_error code
*/
gdf_error gdf_to_csr(gdf_column **gdfData, int numCol, csr_gdf *csrReturn) {
int64_t numNull = 0;
int64_t nnz = 0;
cudf::size_type numRows = gdfData[0]->size;
gdf_dtype dType = gdf_dtype::GDF_invalid; // the data type to make the CSR element array (A)
/**
* Currently the gdf_dtype enum is arranged based on data size, as long as it stays that way the enum values can be
* exploited by just picking the largest enum value
*
* While looping, also get the number of null values (this will work one day)
*/
for ( int x =0; x < numCol; x++) {
if( gdfData[x]->dtype > dType)
dType = gdfData[x]->dtype;
numNull += gdfData[x]->null_count;
}
if (dType == gdf_dtype::GDF_invalid || dType == gdf_dtype::GDF_STRING )
return gdf_error::GDF_UNSUPPORTED_DTYPE;
// the number of valid elements is simple the max number of possible elements (rows * columns) minus the number of nulls
// the current problem is that algorithms are not setting null_count;
// cudf::size_type is 32bits (int) but the total size could be larger than an int, so use a long
nnz = (numRows * numCol) - numNull;
// Allocate space for the offset - this will eventually be IA - dtype is long since the sum of all column elements could be larger than int32
cudf::size_type * offsets;
RMM_TRY(RMM_ALLOC((void**)&offsets, (numRows + 2) * sizeof(int64_t), 0)); // TODO: non-default stream?
CUDA_TRY(hipMemset(offsets, 0, ( sizeof(int64_t) * (numRows + 2) ) ));
// do a pass over each columns, and have each column updates the row count
//-- threads and blocks
int threads = 1024;
int blocks = (numRows + threads - 1) / threads;
for ( int x = 0; x < numCol; x++ ) {
hipLaunchKernelGGL(( determineValidRecCount), dim3(blocks), dim3(threads), 0, 0, gdfData[x]->valid, numRows, numCol, offsets);
}
//--------------------------------------------------------------------------------------
// Now do an exclusive scan to compute the offsets for where to write data
thrust::exclusive_scan(rmm::exec_policy()->on(0), offsets, (offsets + numRows + 1), offsets);
//--------------------------------------------------------------------------------------
// get the number of elements - NNZ, this is the last item in the array
CUDA_TRY( hipMemcpy((void *)&nnz, (void *)&offsets[numRows], sizeof(int64_t), hipMemcpyDeviceToHost) );
if ( nnz == 0)
return GDF_CUDA_ERROR;
//--------------------------------------------------------------------------------------
// now start creating output data
cudf::size_type* IA;
RMM_TRY(RMM_ALLOC((void**)&IA, (numRows + 2) * sizeof(cudf::size_type), 0));
CUDA_TRY(hipMemcpy(IA, offsets, ( sizeof(cudf::size_type) * (numRows + 2) ), hipMemcpyDeviceToDevice) );
int64_t * JA;
RMM_TRY( RMM_ALLOC((void**)&JA, (sizeof(int64_t) * nnz), 0));
//----------------------------------------------------------------------------------
// Now just missing A and the moving of data
csrReturn->dtype = dType;
csrReturn->rows = numRows;
csrReturn->cols = numCol;
csrReturn->dtype = dType;
csrReturn->JA = JA;
csrReturn->IA = IA;
csrReturn->nnz = nnz;
// Start processing based on data type
gdf_error status = GDF_SUCCESS;
switch(dType) {
case gdf_dtype::GDF_INT8:
status = runConverter<int8_t>(gdfData, csrReturn, offsets);
break;
case gdf_dtype::GDF_INT16:
status = runConverter<int16_t>(gdfData, csrReturn, offsets);
break;
case gdf_dtype::GDF_INT32:
status = runConverter<int32_t>(gdfData, csrReturn, offsets);
break;
case gdf_dtype::GDF_INT64:
status = runConverter<int64_t>(gdfData, csrReturn, offsets);
break;
case gdf_dtype::GDF_FLOAT32:
status = runConverter<float>(gdfData, csrReturn, offsets);
break;
case gdf_dtype::GDF_FLOAT64:
status = runConverter<double>(gdfData, csrReturn, offsets);
break;
default:
RMM_TRY(RMM_FREE(IA, 0));
RMM_TRY(RMM_FREE(JA, 0));
RMM_TRY(RMM_FREE(offsets, 0));
return GDF_UNSUPPORTED_DTYPE;
}
RMM_TRY(RMM_FREE(offsets, 0));
return status;
}
template<typename T>
gdf_error runConverter(gdf_column **gdfData, csr_gdf *csrReturn, cudf::size_type * offsets) {
cudf::size_type numCols = csrReturn->cols;
cudf::size_type numRows = csrReturn->rows;
//-- threads and blocks
int threads = 1024;
if ( numRows < 100 ) {
threads = 64;
} else if (numRows < 256) {
threads = 128;
} else if ( numRows < 512) {
threads = 256;
} else if ( numRows < 1024) {
threads = 512;
}
int blocks = (numRows + threads - 1) / threads;
T * A;
RMM_TRY(RMM_ALLOC((void**)&A, (sizeof(T) * csrReturn->nnz), 0));
CUDA_TRY(hipMemset(A, 0, (sizeof(T) * csrReturn->nnz)));
// Now start moving the data and creating the CSR
for ( cudf::size_type colId = 0; colId < numCols; colId++ ) {
gdf_column *gdf = gdfData[colId];
hipLaunchKernelGGL(( cudaCreateCSR<T>), dim3(blocks), dim3(threads), 0, 0, gdf->data, gdf->valid, gdf->dtype, colId, A, csrReturn->JA, offsets, numRows);
CUDA_CHECK_LAST();
}
csrReturn->A = A;
return gdf_error::GDF_SUCCESS;
}
/*
* Move data over into CSR and possible convert format
*/
template<typename T>
__global__ void cudaCreateCSR(
void *data, cudf::valid_type *valid, gdf_dtype dtype, int colId,
T *A, int64_t *JA, cudf::size_type *offsets, cudf::size_type numRows)
{
int tid = threadIdx.x + (blockDim.x * blockIdx.x); // get the tread ID which is also the row number
if ( tid >= numRows)
return;
int bitmapIdx = whichBitmapCSR(tid); // which bitmap
int bitIdx = whichBitCSR(tid); // which bit - over an 8-bit index
cudf::valid_type bitmap = valid[bitmapIdx];
if ( checkBitCSR( bitmap, bitIdx) ) {
cudf::size_type offsetIdx = offsets[tid]; // where should this thread start writing data
A[offsetIdx] = convertDataElement<T>(data, tid, dtype);
JA[offsetIdx] = colId;
++offsets[tid];
}
}
/*
* Compute the number of valid entries per rows - a row spans multiple gdf_colums -
* There is one thread running per row, so just compute the sum for this row.
*
* the number of elements a valid array is actually ceil(numRows / 8) since it is a bitmap. the total number of bits checked is equal to numRows
*
*/
__global__ void determineValidRecCount(cudf::valid_type *valid, cudf::size_type numRows, cudf::size_type numCol, cudf::size_type * offset) {
int tid = threadIdx.x + (blockDim.x * blockIdx.x); // get the tread ID which is also the row number
if ( tid >= numRows)
return;
int bitmapIdx = whichBitmapCSR(tid); // want the floor of the divide
int bitIdx = whichBitCSR(tid); // which bit - over an 8-bit index
cudf::valid_type bitmap = valid[bitmapIdx];
if (checkBitCSR( bitmap, bitIdx) )
++offset[tid];
}
/**
* Convert the data element into a common format
*/
template<typename T>
__device__ T convertDataElement(void *data, int tid, gdf_dtype dtype) {
T answer;
switch(dtype) {
case gdf_dtype::GDF_INT8: {
int8_t *a = (int8_t *)data;
answer = (T)(a[tid]);
break;
}
case gdf_dtype::GDF_INT16: {
int16_t *b = (int16_t *)data;
answer = (T)(b[tid]);
break;
}
case gdf_dtype::GDF_INT32: {
int32_t *c = (int32_t *)data;
answer = (T)(c[tid]);
break;
}
case gdf_dtype::GDF_INT64: {
int64_t *d = (int64_t *)data;
answer = (T)(d[tid]);
break;
}
case gdf_dtype::GDF_FLOAT32: {
float *e = (float *)data;
answer = (T)(e[tid]);
break;
}
case gdf_dtype::GDF_FLOAT64: {
double *f = (double *)data;
answer = (T)(f[tid]);
break;
}
}
return answer;
}
| adf1937924c312b102518aef47b00f21233c206d.cu | /*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file gdf-csr.cu code to convert a GDF matrix into a CSR
*
*/
#include <cudf/cudf.h>
#include <utilities/error_utils.hpp>
#include <rmm/rmm.h>
#include <rmm/thrust_rmm_allocator.h>
#include <thrust/scan.h>
#include <thrust/execution_policy.h>
using namespace std;
//--- all the private functions
template<typename T>
gdf_error runConverter(gdf_column **gdfData, csr_gdf *csrReturn, cudf::size_type * offsets);
//--- private CUDA functions / kernels
template<typename T>
__global__ void cudaCreateCSR(void *data, cudf::valid_type *valid, gdf_dtype dtype, int colID, T *A, int64_t *JA, cudf::size_type *offsets, cudf::size_type numRows);
__global__ void determineValidRecCount(cudf::valid_type *validArray, cudf::size_type numRows, cudf::size_type numCol, cudf::size_type * offset);
template<typename T>
__device__ T convertDataElement(gdf_column *gdf, int idx, gdf_dtype dtype);
__device__ int whichBitmapCSR(int record) { return (record/8); }
__device__ int whichBitCSR(int bit) { return (bit % 8); }
__device__ int checkBitCSR(cudf::valid_type data, int bit) {
cudf::valid_type bitMask[8] = {1, 2, 4, 8, 16, 32, 64, 128};
return (data & bitMask[bit]);
}
//
//------------------------------------------------------------
//
/*
* Convert a Dense GDF into a CSR GDF
*
* Restrictions: All columns need to be of the same length
*/
/**
* @brief convert a GDF into a CSR
*
* Take a matrix in GDF format and convert it into a CSR. The column major matrix needs to have every column defined.
* Passing in a COO datset will be treated as a two column matrix
*
* @param[in] gdfData the ordered list of columns
* @param[in] numCol the number of columns in the gdfData array
*
* @param[out] csrReturn a pointer to the returned data structure
*
* @return gdf_error code
*/
gdf_error gdf_to_csr(gdf_column **gdfData, int numCol, csr_gdf *csrReturn) {
int64_t numNull = 0;
int64_t nnz = 0;
cudf::size_type numRows = gdfData[0]->size;
gdf_dtype dType = gdf_dtype::GDF_invalid; // the data type to make the CSR element array (A)
/**
* Currently the gdf_dtype enum is arranged based on data size, as long as it stays that way the enum values can be
* exploited by just picking the largest enum value
*
* While looping, also get the number of null values (this will work one day)
*/
for ( int x =0; x < numCol; x++) {
if( gdfData[x]->dtype > dType)
dType = gdfData[x]->dtype;
numNull += gdfData[x]->null_count;
}
if (dType == gdf_dtype::GDF_invalid || dType == gdf_dtype::GDF_STRING )
return gdf_error::GDF_UNSUPPORTED_DTYPE;
// the number of valid elements is simple the max number of possible elements (rows * columns) minus the number of nulls
// the current problem is that algorithms are not setting null_count;
// cudf::size_type is 32bits (int) but the total size could be larger than an int, so use a long
nnz = (numRows * numCol) - numNull;
// Allocate space for the offset - this will eventually be IA - dtype is long since the sum of all column elements could be larger than int32
cudf::size_type * offsets;
RMM_TRY(RMM_ALLOC((void**)&offsets, (numRows + 2) * sizeof(int64_t), 0)); // TODO: non-default stream?
CUDA_TRY(cudaMemset(offsets, 0, ( sizeof(int64_t) * (numRows + 2) ) ));
// do a pass over each columns, and have each column updates the row count
//-- threads and blocks
int threads = 1024;
int blocks = (numRows + threads - 1) / threads;
for ( int x = 0; x < numCol; x++ ) {
determineValidRecCount<<<blocks, threads>>>(gdfData[x]->valid, numRows, numCol, offsets);
}
//--------------------------------------------------------------------------------------
// Now do an exclusive scan to compute the offsets for where to write data
thrust::exclusive_scan(rmm::exec_policy()->on(0), offsets, (offsets + numRows + 1), offsets);
//--------------------------------------------------------------------------------------
// get the number of elements - NNZ, this is the last item in the array
CUDA_TRY( cudaMemcpy((void *)&nnz, (void *)&offsets[numRows], sizeof(int64_t), cudaMemcpyDeviceToHost) );
if ( nnz == 0)
return GDF_CUDA_ERROR;
//--------------------------------------------------------------------------------------
// now start creating output data
cudf::size_type* IA;
RMM_TRY(RMM_ALLOC((void**)&IA, (numRows + 2) * sizeof(cudf::size_type), 0));
CUDA_TRY(cudaMemcpy(IA, offsets, ( sizeof(cudf::size_type) * (numRows + 2) ), cudaMemcpyDeviceToDevice) );
int64_t * JA;
RMM_TRY( RMM_ALLOC((void**)&JA, (sizeof(int64_t) * nnz), 0));
//----------------------------------------------------------------------------------
// Now just missing A and the moving of data
csrReturn->dtype = dType;
csrReturn->rows = numRows;
csrReturn->cols = numCol;
csrReturn->dtype = dType;
csrReturn->JA = JA;
csrReturn->IA = IA;
csrReturn->nnz = nnz;
// Start processing based on data type
gdf_error status = GDF_SUCCESS;
switch(dType) {
case gdf_dtype::GDF_INT8:
status = runConverter<int8_t>(gdfData, csrReturn, offsets);
break;
case gdf_dtype::GDF_INT16:
status = runConverter<int16_t>(gdfData, csrReturn, offsets);
break;
case gdf_dtype::GDF_INT32:
status = runConverter<int32_t>(gdfData, csrReturn, offsets);
break;
case gdf_dtype::GDF_INT64:
status = runConverter<int64_t>(gdfData, csrReturn, offsets);
break;
case gdf_dtype::GDF_FLOAT32:
status = runConverter<float>(gdfData, csrReturn, offsets);
break;
case gdf_dtype::GDF_FLOAT64:
status = runConverter<double>(gdfData, csrReturn, offsets);
break;
default:
RMM_TRY(RMM_FREE(IA, 0));
RMM_TRY(RMM_FREE(JA, 0));
RMM_TRY(RMM_FREE(offsets, 0));
return GDF_UNSUPPORTED_DTYPE;
}
RMM_TRY(RMM_FREE(offsets, 0));
return status;
}
template<typename T>
gdf_error runConverter(gdf_column **gdfData, csr_gdf *csrReturn, cudf::size_type * offsets) {
cudf::size_type numCols = csrReturn->cols;
cudf::size_type numRows = csrReturn->rows;
//-- threads and blocks
int threads = 1024;
if ( numRows < 100 ) {
threads = 64;
} else if (numRows < 256) {
threads = 128;
} else if ( numRows < 512) {
threads = 256;
} else if ( numRows < 1024) {
threads = 512;
}
int blocks = (numRows + threads - 1) / threads;
T * A;
RMM_TRY(RMM_ALLOC((void**)&A, (sizeof(T) * csrReturn->nnz), 0));
CUDA_TRY(cudaMemset(A, 0, (sizeof(T) * csrReturn->nnz)));
// Now start moving the data and creating the CSR
for ( cudf::size_type colId = 0; colId < numCols; colId++ ) {
gdf_column *gdf = gdfData[colId];
cudaCreateCSR<T><<<blocks, threads>>>(gdf->data, gdf->valid, gdf->dtype, colId, A, csrReturn->JA, offsets, numRows);
CUDA_CHECK_LAST();
}
csrReturn->A = A;
return gdf_error::GDF_SUCCESS;
}
/*
* Move data over into CSR and possible convert format
*/
template<typename T>
__global__ void cudaCreateCSR(
void *data, cudf::valid_type *valid, gdf_dtype dtype, int colId,
T *A, int64_t *JA, cudf::size_type *offsets, cudf::size_type numRows)
{
int tid = threadIdx.x + (blockDim.x * blockIdx.x); // get the tread ID which is also the row number
if ( tid >= numRows)
return;
int bitmapIdx = whichBitmapCSR(tid); // which bitmap
int bitIdx = whichBitCSR(tid); // which bit - over an 8-bit index
cudf::valid_type bitmap = valid[bitmapIdx];
if ( checkBitCSR( bitmap, bitIdx) ) {
cudf::size_type offsetIdx = offsets[tid]; // where should this thread start writing data
A[offsetIdx] = convertDataElement<T>(data, tid, dtype);
JA[offsetIdx] = colId;
++offsets[tid];
}
}
/*
* Compute the number of valid entries per rows - a row spans multiple gdf_colums -
* There is one thread running per row, so just compute the sum for this row.
*
* the number of elements a valid array is actually ceil(numRows / 8) since it is a bitmap. the total number of bits checked is equal to numRows
*
*/
__global__ void determineValidRecCount(cudf::valid_type *valid, cudf::size_type numRows, cudf::size_type numCol, cudf::size_type * offset) {
int tid = threadIdx.x + (blockDim.x * blockIdx.x); // get the tread ID which is also the row number
if ( tid >= numRows)
return;
int bitmapIdx = whichBitmapCSR(tid); // want the floor of the divide
int bitIdx = whichBitCSR(tid); // which bit - over an 8-bit index
cudf::valid_type bitmap = valid[bitmapIdx];
if (checkBitCSR( bitmap, bitIdx) )
++offset[tid];
}
/**
* Convert the data element into a common format
*/
template<typename T>
__device__ T convertDataElement(void *data, int tid, gdf_dtype dtype) {
T answer;
switch(dtype) {
case gdf_dtype::GDF_INT8: {
int8_t *a = (int8_t *)data;
answer = (T)(a[tid]);
break;
}
case gdf_dtype::GDF_INT16: {
int16_t *b = (int16_t *)data;
answer = (T)(b[tid]);
break;
}
case gdf_dtype::GDF_INT32: {
int32_t *c = (int32_t *)data;
answer = (T)(c[tid]);
break;
}
case gdf_dtype::GDF_INT64: {
int64_t *d = (int64_t *)data;
answer = (T)(d[tid]);
break;
}
case gdf_dtype::GDF_FLOAT32: {
float *e = (float *)data;
answer = (T)(e[tid]);
break;
}
case gdf_dtype::GDF_FLOAT64: {
double *f = (double *)data;
answer = (T)(f[tid]);
break;
}
}
return answer;
}
|
3ea60728fbf677ced1cd34382c19df025c2840dc.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include <assert.h>
#include <stdio.h>
#include <iostream>
using namespace std;
using std::cout;
using std::endl;
#include "runners.h"
#include "kernels-3d.h"
static constexpr long3 lens = {
((1 << 8) + 2),
((1 << 8) + 4),
((1 << 8) + 8)};
static constexpr long lens_flat = lens.x * lens.y * lens.z;
static constexpr long n_runs = 100;
static Globs
<long3,int3
,Kernel3dVirtual
,Kernel3dPhysMultiDim
,Kernel3dPhysSingleDim
> G(lens, lens_flat, n_runs);
template<
const int amin_x, const int amin_y, const int amin_z,
const int amax_x, const int amax_y, const int amax_z>
__host__
void stencil_3d_cpu(
const T* start,
T* out)
{
constexpr int3 range = {
amax_x - amin_x + 1,
amax_y - amin_y + 1,
amax_z - amin_z + 1};
constexpr int total_range = range.x * range.y * range.z;
const int max_x_idx = lens.x - 1;
const int max_y_idx = lens.y - 1;
const int max_z_idx = lens.z - 1;
for (int gidz = 0; gidz < lens.z; ++gidz){
for (int gidy = 0; gidy < lens.y; ++gidy){
for (int gidx = 0; gidx < lens.x; ++gidx){
T arr[total_range];
for(int i=0; i < range.z; i++){
for(int j=0; j < range.y; j++){
for(int k=0; k < range.x; k++){
const long z = bound<(amin_z<0),long>(gidz + (i + amin_z), max_z_idx);
const long y = bound<(amin_y<0),long>(gidy + (j + amin_y), max_y_idx);
const long x = bound<(amin_x<0),long>(gidx + (k + amin_x), max_x_idx);
const long index = (z*lens.y + y)*lens.x + x;
const int flat_idx = (i*range.y + j)*range.x + k;
arr[flat_idx] = start[index];
}
}
}
T lambda_res = stencil_fun_3d<amin_x, amin_y, amin_z, amax_x, amax_y, amax_z>(arr);
out[(gidz*lens.y + gidy)*lens.x + gidx] = lambda_res;
}
}
}
}
template<
const int amin_x, const int amin_y, const int amin_z,
const int amax_x, const int amax_y, const int amax_z>
__host__
void run_cpu_3d(T* cpu_out)
{
T* cpu_in = (T*)malloc(lens_flat*sizeof(T));
srand(1);
for (int i = 0; i < lens_flat; ++i)
{
cpu_in[i] = (T)rand();
}
struct timeval t_startpar, t_endpar, t_diffpar;
gettimeofday(&t_startpar, NULL);
{
stencil_3d_cpu<amin_x, amin_y, amin_z, amax_x, amax_y, amax_z>(cpu_in,cpu_out);
}
gettimeofday(&t_endpar, NULL);
timeval_subtract(&t_diffpar, &t_endpar, &t_startpar);
const unsigned long elapsed = (t_diffpar.tv_sec*1e6+t_diffpar.tv_usec) / 1000;
const unsigned long seconds = elapsed / 1000;
const unsigned long microseconds = elapsed % 1000;
printf("cpu c 3d for 1 run : %lu.%03lu seconds\n", seconds, microseconds);
free(cpu_in);
}
template<
const int amin_z, const int amax_z,
const int amin_y, const int amax_y,
const int amin_x, const int amax_x,
const int group_size_x, const int group_size_y, const int group_size_z,
const int strip_pow_x, const int strip_pow_y, const int strip_pow_z>
__host__
void doTest_3D(const int physBlocks)
{
static_assert(amin_z <= amax_z, "invalid setup");
static_assert(amin_y <= amax_y, "invalid setup");
static_assert(amin_x <= amax_x, "invalid setup");
const int z_range = (amax_z + 1) - amin_z;
const int y_range = (amax_y + 1) - amin_y;
const int x_range = (amax_x + 1) - amin_x;
#ifdef Jacobi3D
const int ixs_len = z_range + y_range + x_range - 2;
#else
const int ixs_len = z_range * y_range * x_range;
#endif
cout << "ixs[" << ixs_len << "] = (zr,yr,xr) = (" << amin_z << "..." << amax_z << ", " << amin_y << "..." << amax_y << ", " << amin_x << "..." << amax_x << ")\n";
constexpr long len = lens_flat;
T* cpu_out = (T*)malloc(len*sizeof(T));
run_cpu_3d<amin_x, amin_y, amin_z, amax_x, amax_y, amax_z>(cpu_out);
constexpr int blockDim_flat = group_size_x * group_size_y * group_size_z;
constexpr int3 virtual_grid = {
divUp((int)lens.x, group_size_x),
divUp((int)lens.y, group_size_y),
divUp((int)lens.z, group_size_z)};
constexpr dim3 block_3d(group_size_x,group_size_y,group_size_z);
constexpr dim3 block_3d_flat(group_size_x*group_size_y*group_size_z,1,1);
constexpr dim3 grid_3d(virtual_grid.x, virtual_grid.y, virtual_grid.z);
constexpr int virtual_grid_flat = virtual_grid.x * virtual_grid.y * virtual_grid.z;
constexpr int3 virtual_grid_spans = { 1, virtual_grid.x, virtual_grid.x * virtual_grid.y };
constexpr int sh_size_x = group_size_x + amax_x - amin_x;
constexpr int sh_size_y = group_size_y + amax_y - amin_y;
constexpr int sh_size_z = group_size_z + amax_z - amin_z;
constexpr int sh_size_flat = sh_size_x * sh_size_y * sh_size_z;
constexpr int sh_mem_size_flat = sh_size_flat * sizeof(T);
cout << "Blockdim z,y,x = " << group_size_z << ", " << group_size_y << ", " << group_size_x << endl;
//printf("virtual number of blocks = %d\n", virtual_grid_flat);
{
/*{
cout << "## Benchmark 3d global read - inlined ixs - multiDim grid ##";
Kernel3dPhysMultiDim kfun = global_reads_3d_inlined
<amin_x,amin_y,amin_z
,amax_x,amax_y,amax_z
,group_size_x,group_size_y,group_size_z>;
for(int i=0;i<4;i++){
G.do_run_multiDim(kfun, cpu_out, grid_3d, block_3d, 1, false); // warmup as it is first kernel
}
G.do_run_multiDim(kfun, cpu_out, grid_3d, block_3d, 1);
}
{
cout << "## Benchmark 3d global read - inlined ixs - singleDim grid - grid span ##";
Kernel3dPhysSingleDim kfun = global_reads_3d_inlined_singleDim_gridSpan
<amin_x,amin_y,amin_z
,amax_x,amax_y,amax_z
,group_size_x,group_size_y,group_size_z>;
G.do_run_singleDim(kfun, cpu_out, virtual_grid_flat, blockDim_flat, virtual_grid_spans, 1);
}
{
constexpr long lens_grid = divUp(lens_flat, long(blockDim_flat));
constexpr int3 lens_spans = { 1, int(lens.x), int(lens.x*lens.y) };
cout << "## Benchmark 3d global read - inlined ixs - singleDim grid - lens span ##";
Kernel3dPhysSingleDim kfun = global_reads_3d_inlined_singleDim_lensSpan
<amin_x,amin_y,amin_z
,amax_x,amax_y,amax_z
,group_size_x,group_size_y,group_size_z>;
G.do_run_singleDim(kfun, cpu_out, lens_grid, blockDim_flat, lens_spans, 1);
}
{
cout << "## Benchmark 3d global read - inlined idxs - virtual (add/carry) - singleDim grid ##";
Kernel3dVirtual kfun = virtual_addcarry_global_read_3d_inlined_grid_span_singleDim
<amin_x,amin_y,amin_z
,amax_x,amax_y,amax_z
,group_size_x,group_size_y,group_size_z>;
G.do_run_virtual(kfun, cpu_out, physBlocks, blockDim_flat, virtual_grid, 1);
}
{
cout << "## Benchmark 3d big tile - inlined idxs - cube load - multiDim grid ##";
Kernel3dPhysMultiDim kfun = big_tile_3d_inlined
<amin_x,amin_y,amin_z
,amax_x,amax_y,amax_z
,group_size_x,group_size_y,group_size_z>;
G.do_run_multiDim(kfun, cpu_out, grid_3d, block_3d, sh_mem_size_flat);
}*/
/*{
cout << "## Benchmark 3d big tile - inlined idxs - transaction aligned loads - multiDim grid ##";
Kernel3dPhysMultiDim kfun = big_tile_3d_inlined_trx_align
<amin_x,amin_y,amin_z
,amax_x,amax_y,amax_z
,group_size_x,group_size_y,group_size_z>;
G.do_run_multiDim(kfun, cpu_out, grid_3d, block_3d, sh_mem_size_flat);
}*/
/*
{
cout << "## Benchmark 3d big tile - inlined idxs - forced coalesced flat load (div/rem) - multiDim grid ##";
Kernel3dPhysMultiDim kfun = big_tile_3d_inlined_flat_forced_coalesced
<amin_x,amin_y,amin_z
,amax_x,amax_y,amax_z
,group_size_x,group_size_y,group_size_z>;
G.do_run_multiDim(kfun, cpu_out, grid_3d, block_3d, sh_mem_size_flat);
}
*/
/*{
cout << "## Benchmark 3d big tile - inlined idxs - cube reshape (div/rem) - multiDim grid ##";
Kernel3dPhysMultiDim kfun = big_tile_3d_inlined_cube_reshape
<amin_x,amin_y,amin_z
,amax_x,amax_y,amax_z
,group_size_x,group_size_y,group_size_z>;
G.do_run_multiDim(kfun, cpu_out, grid_3d, block_3d, sh_mem_size_flat);
}
{
cout << "## Benchmark 3d big tile - inlined idxs - flat load (div/rem) - multiDim grid ##";
Kernel3dPhysMultiDim kfun = big_tile_3d_inlined_flat
<amin_x,amin_y,amin_z
,amax_x,amax_y,amax_z
,group_size_x,group_size_y,group_size_z>;
G.do_run_multiDim(kfun, cpu_out, grid_3d, block_3d, sh_mem_size_flat);
}
{
cout << "## Benchmark 3d big tile - inlined idxs - flat load (div/rem) - singleDim grid ##";
Kernel3dPhysSingleDim kfun = big_tile_3d_inlined_flat_singleDim
<amin_x,amin_y,amin_z
,amax_x,amax_y,amax_z
,group_size_x,group_size_y,group_size_z>;
G.do_run_singleDim(kfun, cpu_out, virtual_grid_flat, blockDim_flat, virtual_grid, sh_mem_size_flat);
}
{
cout << "## Benchmark 3d big tile - inlined idxs - flat load (add/carry) - singleDim grid ##";
Kernel3dPhysSingleDim kfun = big_tile_3d_inlined_flat_addcarry_singleDim
<amin_x,amin_y,amin_z
,amax_x,amax_y,amax_z
,group_size_x,group_size_y,group_size_z>;
G.do_run_singleDim(kfun, cpu_out, virtual_grid_flat, blockDim_flat, virtual_grid, sh_mem_size_flat);
}
{
cout << "## Benchmark 3d big tile - inlined idxs - virtual (add/carry) - flat load (div/rem) - multiDim grid ##";
Kernel3dVirtual kfun = virtual_addcarry_big_tile_3d_inlined_flat_divrem_MultiDim
<amin_x,amin_y,amin_z
,amax_x,amax_y,amax_z
,group_size_x,group_size_y,group_size_z>;
G.do_run_virtual(kfun, cpu_out, physBlocks, block_3d, virtual_grid, sh_mem_size_flat);
}
{
cout << "## Benchmark 3d big tile - inlined idxs - virtual (add/carry) - flat load (div/rem) - singleDim grid ##";
Kernel3dVirtual kfun = virtual_addcarry_big_tile_3d_inlined_flat_divrem_singleDim
<amin_x,amin_y,amin_z
,amax_x,amax_y,amax_z
,group_size_x,group_size_y,group_size_z>;
G.do_run_virtual(kfun, cpu_out, physBlocks, block_3d_flat, virtual_grid, sh_mem_size_flat);
}
{
cout << "## Benchmark 3d big tile - inlined idxs - virtual (rem/div) - flat load (div/rem) - singleDim grid ##";
Kernel3dVirtual kfun = virtual_divrem_big_tile_3d_inlined_flat_divrem_singleDim
<amin_x,amin_y,amin_z
,amax_x,amax_y,amax_z
,group_size_x,group_size_y,group_size_z>;
G.do_run_virtual(kfun, cpu_out, physBlocks, block_3d_flat, virtual_grid, sh_mem_size_flat);
}
{
cout << "## Benchmark 3d big tile - inlined idxs - virtual (add/carry) - flat load (add/carry) - singleDim grid ##";
Kernel3dVirtual kfun = virtual_addcarry_big_tile_3d_inlined_flat_addcarry_singleDim
<amin_x,amin_y,amin_z
,amax_x,amax_y,amax_z
,group_size_x,group_size_y,group_size_z>;
G.do_run_virtual(kfun, cpu_out, physBlocks, block_3d_flat, virtual_grid, sh_mem_size_flat);
}*/
constexpr int strip_x = 1 << strip_pow_x;
constexpr int strip_y = 1 << strip_pow_y;
constexpr int strip_z = 1 << strip_pow_z;
constexpr int strip_size_x = group_size_x*strip_x;
constexpr int strip_size_y = group_size_y*strip_y;
constexpr int strip_size_z = group_size_z*strip_z;
constexpr int sh_x = strip_size_x + amax_x - amin_x;
constexpr int sh_y = strip_size_y + amax_y - amin_y;
constexpr int sh_z = strip_size_z + amax_z - amin_z;
constexpr int strip_sh_total = sh_x * sh_y * sh_z;
constexpr int strip_sh_total_mem_usage = strip_sh_total * sizeof(T);
//printf("shared memory used = %d B\n", strip_sh_total_mem_usage);
constexpr int max_shared_mem = 0xc000; // 48KiB
static_assert(strip_sh_total_mem_usage < max_shared_mem,
"Current configuration requires too much shared memory\n");
// this should technically be measured but it it div by (2^n) so it is very fast, and won't matter much.
const int3 strip_grid = {
int(divUp(lens.x, long(strip_size_x))),
int(divUp(lens.y, long(strip_size_y))),
int(divUp(lens.z, long(strip_size_z)))};
const int strip_grid_flat = product(strip_grid);
{
//cout << "## Benchmark 3d big tile - inlined idxs - stripmined: ";
//printf("strip_size=[%d][%d][%d]f32 ", strip_size_z, strip_size_y, strip_size_x);
//cout << "- flat load (add/carry) - singleDim grid ##";
Kernel3dPhysSingleDim kfun = stripmine_big_tile_3d_inlined_flat_addcarry_singleDim
<amin_x,amin_y,amin_z
,amax_x,amax_y,amax_z
,group_size_x,group_size_y,group_size_z
,strip_x,strip_y,strip_z
>;
G.do_run_singleDim(kfun, cpu_out, strip_grid_flat, blockDim_flat, strip_grid, strip_sh_total_mem_usage,false);
G.do_run_singleDim(kfun, cpu_out, strip_grid_flat, blockDim_flat, strip_grid, strip_sh_total_mem_usage);
}
/*{
cout << "## Benchmark 3d big tile - inlined idxs - stripmined: ";
printf("strip_size=[%d][%d][%d]f32 ", strip_size_z, strip_size_y, strip_size_x);
cout << "- cube loader - singleDim grid ##";
Kernel3dPhysSingleDim kfun = stripmine_big_tile_3d_inlined_cube_singleDim
<amin_x,amin_y,amin_z
,amax_x,amax_y,amax_z
,group_size_x,group_size_y,group_size_z
,strip_x,strip_y,strip_z
>;
G.do_run_singleDim(kfun, cpu_out, strip_grid_flat, blockDim_flat, strip_grid, strip_sh_total_mem_usage);
}
{
cout << "## Benchmark 3d big tile - inlined idxs - stripmined: ";
printf("strip_size=[%d][%d][%d]f32 ", strip_size_z, strip_size_y, strip_size_x);
cout << "- virtual (add/carry) - flat load (add/carry) - singleDim grid ##";
Kernel3dVirtual kfun = virtual_addcarry_stripmine_big_tile_3d_inlined_flat_addcarry_singleDim
<amin_x,amin_y,amin_z
,amax_x,amax_y,amax_z
,group_size_x,group_size_y,group_size_z
,strip_x,strip_y,strip_z
>;
G.do_run_virtual(kfun, cpu_out, physBlocks, block_3d_flat, strip_grid, strip_sh_total_mem_usage);
}*/
}
free(cpu_out);
(void)block_3d;
(void)block_3d_flat;
(void)grid_3d;
(void)virtual_grid_spans;
(void)virtual_grid_flat;
(void)sh_mem_size_flat;
}
template
<const int gx, const int gy, const int gz
,const int sx, const int sy, const int sz>
__host__
void testStrips(const int physBlocks){
doTest_3D<-1,0, -1,0, -1,0, gx,gy,gz,sx,sy,sz>(physBlocks);
doTest_3D<-1,1, -1,0, -1,0, gx,gy,gz,sx,sy,sz>(physBlocks);
doTest_3D<-1,1, -1,1, -1,0, gx,gy,gz,sx,sy,sz>(physBlocks);
doTest_3D<-1,1, -1,0, -1,1, gx,gy,gz,sx,sy,sz>(physBlocks);
doTest_3D<-1,1, -1,1, -1,1, gx,gy,gz,sx,sy,sz>(physBlocks);
doTest_3D<-1,2, -1,1, -1,1, gx,gy,gz,sx,sy,sz>(physBlocks);
doTest_3D<-1,2, -1,2, -1,1, gx,gy,gz,sx,sy,sz>(physBlocks);
doTest_3D<-1,2, -1,1, -1,2, gx,gy,gz,sx,sy,sz>(physBlocks);
doTest_3D<-1,2, -1,2, -1,2, gx,gy,gz,sx,sy,sz>(physBlocks);
doTest_3D<-1,3, -1,2, -1,2, gx,gy,gz,sx,sy,sz>(physBlocks);
doTest_3D<-1,3, -1,3, -1,2, gx,gy,gz,sx,sy,sz>(physBlocks);
doTest_3D<-1,3, -1,2, -1,3, gx,gy,gz,sx,sy,sz>(physBlocks);
doTest_3D<-1,3, -1,3, -1,3, gx,gy,gz,sx,sy,sz>(physBlocks);
}
__host__
int main()
{
constexpr int gps_x = 32;
constexpr int gps_y = 4;
constexpr int gps_z = 2;
constexpr int gps_flat = gps_x * gps_y * gps_z;
int physBlocks = getPhysicalBlockCount<gps_flat>();
#ifdef Jacobi3D
cout << "running Jacobi 3D" << endl;
#else
cout << "running Dense stencil with mean" << endl;
#endif
// small test samples.
/*
doTest_3D<0,1,0,1,0,1, gps_x,gps_y,gps_z,1,1,1>(physBlocks);
doTest_3D<-1,1,0,1,0,1, gps_x,gps_y,gps_z,1,1,1>(physBlocks);
doTest_3D<-1,1,0,1,0,1, gps_x,gps_y,gps_z,1,1,1>(physBlocks);
// 0 < amin
doTest_3D<1,2,1,2,1,2, gps_x,gps_y,gps_z,0,1,2>(physBlocks);
// 0 > amax
doTest_3D<-2,-1,-2,-1,-2,-1, gps_x,gps_y,gps_z,0,1,2>(physBlocks);
// z axis is only in use
doTest_3D<-1,1,0,0,0,0, gps_x,gps_y,gps_z,0,0,3>(physBlocks);
doTest_3D<-2,2,0,0,0,0, gps_x,gps_y,gps_z,0,0,3>(physBlocks);
doTest_3D<-3,3,0,0,0,0, gps_x,gps_y,gps_z,0,0,3>(physBlocks);
doTest_3D<-4,4,0,0,0,0, gps_x,gps_y,gps_z,0,0,3>(physBlocks);
doTest_3D<-5,5,0,0,0,0, gps_x,gps_y,gps_z,0,0,3>(physBlocks);
// z-y axis are only in use
doTest_3D<-1,1,-1,1,0,0, gps_x,gps_y,gps_z,0,1,1>(physBlocks);
doTest_3D<-2,2,-2,2,0,0, gps_x,gps_y,gps_z,0,1,1>(physBlocks);
doTest_3D<-3,3,-3,3,0,0, gps_x,gps_y,gps_z,0,0,1>(physBlocks);
doTest_3D<-4,4,-4,4,0,0, gps_x,gps_y,gps_z,0,0,1>(physBlocks);
doTest_3D<-5,5,-5,5,0,0, gps_x,gps_y,gps_z,0,0,0>(physBlocks);
// z-x axis are only in use
doTest_3D<-1,1,0,0,-1,1, gps_x,gps_y,gps_z,1,0,1>(physBlocks);
doTest_3D<-2,2,0,0,-2,2, gps_x,gps_y,gps_z,1,0,1>(physBlocks);
doTest_3D<-3,3,0,0,-3,3, gps_x,gps_y,gps_z,0,0,1>(physBlocks);
doTest_3D<-4,4,0,0,-4,4, gps_x,gps_y,gps_z,0,0,1>(physBlocks);
doTest_3D<-5,5,0,0,-5,5, gps_x,gps_y,gps_z,0,0,0>(physBlocks);
*/
// all axis are in use
doTest_3D<0,1,0,1,0,1, 32,2,2,0,0,0>(physBlocks);
doTest_3D<-1,1,0,1,0,1, 32,2,2,0,0,0>(physBlocks);
doTest_3D<-1,1,-1,1,0,1, 32,2,2,0,0,0>(physBlocks);
doTest_3D<-1,1,-1,1,-1,1, 32,2,2,0,0,0>(physBlocks);
doTest_3D<0,1,0,1,0,1, 32,4,2,0,0,0>(physBlocks);
doTest_3D<-1,1,0,1,0,1, 32,4,2,0,0,0>(physBlocks);
doTest_3D<-1,1,-1,1,0,1, 32,4,2,0,0,0>(physBlocks);
doTest_3D<-1,1,-1,1,-1,1, 32,4,2,0,0,0>(physBlocks);
doTest_3D<0,1,0,1,0,1, 32,8,2,0,0,0>(physBlocks);
doTest_3D<-1,1,0,1,0,1, 32,8,2,0,0,0>(physBlocks);
doTest_3D<-1,1,-1,1,0,1, 32,8,2,0,0,0>(physBlocks);
doTest_3D<-1,1,-1,1,-1,1, 32,8,2,0,0,0>(physBlocks);
doTest_3D<0,1,0,1,0,1, 32,8,4,0,0,0>(physBlocks);
doTest_3D<-1,1,0,1,0,1, 32,8,4,0,0,0>(physBlocks);
doTest_3D<-1,1,-1,1,0,1, 32,8,4,0,0,0>(physBlocks);
doTest_3D<-1,1,-1,1,-1,1, 32,8,4,0,0,0>(physBlocks);
//blocksize test
/*
doTest_3D<0,1,0,1,0,1, 32,8,1,0,0,0>(physBlocks);
doTest_3D<-1,1,0,1,0,1, 32,8,1,0,0,0>(physBlocks);
doTest_3D<-1,1,-1,1,0,1, 32,8,1,0,0,0>(physBlocks);
doTest_3D<-1,1,-1,1,-1,1, 32,8,1,0,0,0>(physBlocks);
doTest_3D<0,1,0,1,0,1, 32,4,2,0,0,0>(physBlocks);
doTest_3D<-1,1,0,1,0,1, 32,4,2,0,0,0>(physBlocks);
doTest_3D<-1,1,-1,1,0,1, 32,4,2,0,0,0>(physBlocks);
doTest_3D<-1,1,-1,1,-1,1, 32,4,2,0,0,0>(physBlocks);
doTest_3D<0,1,0,1,0,1, 32,8,4,0,0,0>(physBlocks);
doTest_3D<-1,1,0,1,0,1, 32,8,4,0,0,0>(physBlocks);
doTest_3D<-1,1,-1,1,0,1, 32,8,4,0,0,0>(physBlocks);
doTest_3D<-1,1,-1,1,-1,1, 32,8,4,0,0,0>(physBlocks);
doTest_3D<0,1,0,1,0,1, 32,16,2,0,0,0>(physBlocks);
doTest_3D<-1,1,0,1,0,1, 32,16,2,0,0,0>(physBlocks);
doTest_3D<-1,1,-1,1,0,1, 32,16,2,0,0,0>(physBlocks);
doTest_3D<-1,1,-1,1,-1,1, 32,16,2,0,0,0>(physBlocks);
*/
/*
doTest_3D<-2,2,-2,2,-2,2, gps_x,gps_y,gps_z,0,0,1>(physBlocks);
doTest_3D<-3,3,-3,3,-3,3, gps_x,gps_y,gps_z,0,0,1>(physBlocks);
doTest_3D<-4,4,-4,4,-4,4, gps_x,gps_y,gps_z,0,0,1>(physBlocks);
doTest_3D<-5,5,-5,5,-5,5, gps_x,gps_y,gps_z,0,0,0>(physBlocks);
*/
constexpr int gx=32;
constexpr int gy=8;
constexpr int gz=4;
//printf("strip_size=[%d][%d][%d]f32 ", 0, 0, 0);
//testStrips<gx,gy,gz,0,0,0>(physBlocks);
//testStrips<gx,gy,gz,1,0,0>(physBlocks);
//testStrips<gx,gy,gz,0,1,0>(physBlocks);
//testStrips<gx,gy,gz,0,0,1>(physBlocks);
//testStrips<gx,gy,gz,1,0,1>(physBlocks);
//testStrips<gx,gy,gz,1,1,0>(physBlocks);
//testStrips<gx,gy,gz,0,1,1>(physBlocks);
//testStrips<gx,gy,gz,1,1,1>(physBlocks);
//testStrips<gx,gy,gz,0,1,1>(physBlocks);
//testStrips<gx,gy,gz,0,0,1>(physBlocks);
//testStrips<gx,gy,gz,1,0,2>(physBlocks);
//testStrips<gx,gy,gz,0,2,1>(physBlocks);
//testStrips<gx,gy,gz,1,2,0>(physBlocks);
//testStrips<gx,gy,gz,2,1,0>(physBlocks);
//testStrips<gx,gy,gz,2,0,1>(physBlocks);
return 0;
}
| 3ea60728fbf677ced1cd34382c19df025c2840dc.cu | #include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#include <cuda_runtime.h>
#include <assert.h>
#include <stdio.h>
#include <iostream>
using namespace std;
using std::cout;
using std::endl;
#include "runners.h"
#include "kernels-3d.h"
static constexpr long3 lens = {
((1 << 8) + 2),
((1 << 8) + 4),
((1 << 8) + 8)};
static constexpr long lens_flat = lens.x * lens.y * lens.z;
static constexpr long n_runs = 100;
static Globs
<long3,int3
,Kernel3dVirtual
,Kernel3dPhysMultiDim
,Kernel3dPhysSingleDim
> G(lens, lens_flat, n_runs);
template<
const int amin_x, const int amin_y, const int amin_z,
const int amax_x, const int amax_y, const int amax_z>
__host__
void stencil_3d_cpu(
const T* start,
T* out)
{
constexpr int3 range = {
amax_x - amin_x + 1,
amax_y - amin_y + 1,
amax_z - amin_z + 1};
constexpr int total_range = range.x * range.y * range.z;
const int max_x_idx = lens.x - 1;
const int max_y_idx = lens.y - 1;
const int max_z_idx = lens.z - 1;
for (int gidz = 0; gidz < lens.z; ++gidz){
for (int gidy = 0; gidy < lens.y; ++gidy){
for (int gidx = 0; gidx < lens.x; ++gidx){
T arr[total_range];
for(int i=0; i < range.z; i++){
for(int j=0; j < range.y; j++){
for(int k=0; k < range.x; k++){
const long z = bound<(amin_z<0),long>(gidz + (i + amin_z), max_z_idx);
const long y = bound<(amin_y<0),long>(gidy + (j + amin_y), max_y_idx);
const long x = bound<(amin_x<0),long>(gidx + (k + amin_x), max_x_idx);
const long index = (z*lens.y + y)*lens.x + x;
const int flat_idx = (i*range.y + j)*range.x + k;
arr[flat_idx] = start[index];
}
}
}
T lambda_res = stencil_fun_3d<amin_x, amin_y, amin_z, amax_x, amax_y, amax_z>(arr);
out[(gidz*lens.y + gidy)*lens.x + gidx] = lambda_res;
}
}
}
}
template<
const int amin_x, const int amin_y, const int amin_z,
const int amax_x, const int amax_y, const int amax_z>
__host__
void run_cpu_3d(T* cpu_out)
{
T* cpu_in = (T*)malloc(lens_flat*sizeof(T));
srand(1);
for (int i = 0; i < lens_flat; ++i)
{
cpu_in[i] = (T)rand();
}
struct timeval t_startpar, t_endpar, t_diffpar;
gettimeofday(&t_startpar, NULL);
{
stencil_3d_cpu<amin_x, amin_y, amin_z, amax_x, amax_y, amax_z>(cpu_in,cpu_out);
}
gettimeofday(&t_endpar, NULL);
timeval_subtract(&t_diffpar, &t_endpar, &t_startpar);
const unsigned long elapsed = (t_diffpar.tv_sec*1e6+t_diffpar.tv_usec) / 1000;
const unsigned long seconds = elapsed / 1000;
const unsigned long microseconds = elapsed % 1000;
printf("cpu c 3d for 1 run : %lu.%03lu seconds\n", seconds, microseconds);
free(cpu_in);
}
template<
const int amin_z, const int amax_z,
const int amin_y, const int amax_y,
const int amin_x, const int amax_x,
const int group_size_x, const int group_size_y, const int group_size_z,
const int strip_pow_x, const int strip_pow_y, const int strip_pow_z>
__host__
void doTest_3D(const int physBlocks)
{
static_assert(amin_z <= amax_z, "invalid setup");
static_assert(amin_y <= amax_y, "invalid setup");
static_assert(amin_x <= amax_x, "invalid setup");
const int z_range = (amax_z + 1) - amin_z;
const int y_range = (amax_y + 1) - amin_y;
const int x_range = (amax_x + 1) - amin_x;
#ifdef Jacobi3D
const int ixs_len = z_range + y_range + x_range - 2;
#else
const int ixs_len = z_range * y_range * x_range;
#endif
cout << "ixs[" << ixs_len << "] = (zr,yr,xr) = (" << amin_z << "..." << amax_z << ", " << amin_y << "..." << amax_y << ", " << amin_x << "..." << amax_x << ")\n";
constexpr long len = lens_flat;
T* cpu_out = (T*)malloc(len*sizeof(T));
run_cpu_3d<amin_x, amin_y, amin_z, amax_x, amax_y, amax_z>(cpu_out);
constexpr int blockDim_flat = group_size_x * group_size_y * group_size_z;
constexpr int3 virtual_grid = {
divUp((int)lens.x, group_size_x),
divUp((int)lens.y, group_size_y),
divUp((int)lens.z, group_size_z)};
constexpr dim3 block_3d(group_size_x,group_size_y,group_size_z);
constexpr dim3 block_3d_flat(group_size_x*group_size_y*group_size_z,1,1);
constexpr dim3 grid_3d(virtual_grid.x, virtual_grid.y, virtual_grid.z);
constexpr int virtual_grid_flat = virtual_grid.x * virtual_grid.y * virtual_grid.z;
constexpr int3 virtual_grid_spans = { 1, virtual_grid.x, virtual_grid.x * virtual_grid.y };
constexpr int sh_size_x = group_size_x + amax_x - amin_x;
constexpr int sh_size_y = group_size_y + amax_y - amin_y;
constexpr int sh_size_z = group_size_z + amax_z - amin_z;
constexpr int sh_size_flat = sh_size_x * sh_size_y * sh_size_z;
constexpr int sh_mem_size_flat = sh_size_flat * sizeof(T);
cout << "Blockdim z,y,x = " << group_size_z << ", " << group_size_y << ", " << group_size_x << endl;
//printf("virtual number of blocks = %d\n", virtual_grid_flat);
{
/*{
cout << "## Benchmark 3d global read - inlined ixs - multiDim grid ##";
Kernel3dPhysMultiDim kfun = global_reads_3d_inlined
<amin_x,amin_y,amin_z
,amax_x,amax_y,amax_z
,group_size_x,group_size_y,group_size_z>;
for(int i=0;i<4;i++){
G.do_run_multiDim(kfun, cpu_out, grid_3d, block_3d, 1, false); // warmup as it is first kernel
}
G.do_run_multiDim(kfun, cpu_out, grid_3d, block_3d, 1);
}
{
cout << "## Benchmark 3d global read - inlined ixs - singleDim grid - grid span ##";
Kernel3dPhysSingleDim kfun = global_reads_3d_inlined_singleDim_gridSpan
<amin_x,amin_y,amin_z
,amax_x,amax_y,amax_z
,group_size_x,group_size_y,group_size_z>;
G.do_run_singleDim(kfun, cpu_out, virtual_grid_flat, blockDim_flat, virtual_grid_spans, 1);
}
{
constexpr long lens_grid = divUp(lens_flat, long(blockDim_flat));
constexpr int3 lens_spans = { 1, int(lens.x), int(lens.x*lens.y) };
cout << "## Benchmark 3d global read - inlined ixs - singleDim grid - lens span ##";
Kernel3dPhysSingleDim kfun = global_reads_3d_inlined_singleDim_lensSpan
<amin_x,amin_y,amin_z
,amax_x,amax_y,amax_z
,group_size_x,group_size_y,group_size_z>;
G.do_run_singleDim(kfun, cpu_out, lens_grid, blockDim_flat, lens_spans, 1);
}
{
cout << "## Benchmark 3d global read - inlined idxs - virtual (add/carry) - singleDim grid ##";
Kernel3dVirtual kfun = virtual_addcarry_global_read_3d_inlined_grid_span_singleDim
<amin_x,amin_y,amin_z
,amax_x,amax_y,amax_z
,group_size_x,group_size_y,group_size_z>;
G.do_run_virtual(kfun, cpu_out, physBlocks, blockDim_flat, virtual_grid, 1);
}
{
cout << "## Benchmark 3d big tile - inlined idxs - cube load - multiDim grid ##";
Kernel3dPhysMultiDim kfun = big_tile_3d_inlined
<amin_x,amin_y,amin_z
,amax_x,amax_y,amax_z
,group_size_x,group_size_y,group_size_z>;
G.do_run_multiDim(kfun, cpu_out, grid_3d, block_3d, sh_mem_size_flat);
}*/
/*{
cout << "## Benchmark 3d big tile - inlined idxs - transaction aligned loads - multiDim grid ##";
Kernel3dPhysMultiDim kfun = big_tile_3d_inlined_trx_align
<amin_x,amin_y,amin_z
,amax_x,amax_y,amax_z
,group_size_x,group_size_y,group_size_z>;
G.do_run_multiDim(kfun, cpu_out, grid_3d, block_3d, sh_mem_size_flat);
}*/
/*
{
cout << "## Benchmark 3d big tile - inlined idxs - forced coalesced flat load (div/rem) - multiDim grid ##";
Kernel3dPhysMultiDim kfun = big_tile_3d_inlined_flat_forced_coalesced
<amin_x,amin_y,amin_z
,amax_x,amax_y,amax_z
,group_size_x,group_size_y,group_size_z>;
G.do_run_multiDim(kfun, cpu_out, grid_3d, block_3d, sh_mem_size_flat);
}
*/
/*{
cout << "## Benchmark 3d big tile - inlined idxs - cube reshape (div/rem) - multiDim grid ##";
Kernel3dPhysMultiDim kfun = big_tile_3d_inlined_cube_reshape
<amin_x,amin_y,amin_z
,amax_x,amax_y,amax_z
,group_size_x,group_size_y,group_size_z>;
G.do_run_multiDim(kfun, cpu_out, grid_3d, block_3d, sh_mem_size_flat);
}
{
cout << "## Benchmark 3d big tile - inlined idxs - flat load (div/rem) - multiDim grid ##";
Kernel3dPhysMultiDim kfun = big_tile_3d_inlined_flat
<amin_x,amin_y,amin_z
,amax_x,amax_y,amax_z
,group_size_x,group_size_y,group_size_z>;
G.do_run_multiDim(kfun, cpu_out, grid_3d, block_3d, sh_mem_size_flat);
}
{
cout << "## Benchmark 3d big tile - inlined idxs - flat load (div/rem) - singleDim grid ##";
Kernel3dPhysSingleDim kfun = big_tile_3d_inlined_flat_singleDim
<amin_x,amin_y,amin_z
,amax_x,amax_y,amax_z
,group_size_x,group_size_y,group_size_z>;
G.do_run_singleDim(kfun, cpu_out, virtual_grid_flat, blockDim_flat, virtual_grid, sh_mem_size_flat);
}
{
cout << "## Benchmark 3d big tile - inlined idxs - flat load (add/carry) - singleDim grid ##";
Kernel3dPhysSingleDim kfun = big_tile_3d_inlined_flat_addcarry_singleDim
<amin_x,amin_y,amin_z
,amax_x,amax_y,amax_z
,group_size_x,group_size_y,group_size_z>;
G.do_run_singleDim(kfun, cpu_out, virtual_grid_flat, blockDim_flat, virtual_grid, sh_mem_size_flat);
}
{
cout << "## Benchmark 3d big tile - inlined idxs - virtual (add/carry) - flat load (div/rem) - multiDim grid ##";
Kernel3dVirtual kfun = virtual_addcarry_big_tile_3d_inlined_flat_divrem_MultiDim
<amin_x,amin_y,amin_z
,amax_x,amax_y,amax_z
,group_size_x,group_size_y,group_size_z>;
G.do_run_virtual(kfun, cpu_out, physBlocks, block_3d, virtual_grid, sh_mem_size_flat);
}
{
cout << "## Benchmark 3d big tile - inlined idxs - virtual (add/carry) - flat load (div/rem) - singleDim grid ##";
Kernel3dVirtual kfun = virtual_addcarry_big_tile_3d_inlined_flat_divrem_singleDim
<amin_x,amin_y,amin_z
,amax_x,amax_y,amax_z
,group_size_x,group_size_y,group_size_z>;
G.do_run_virtual(kfun, cpu_out, physBlocks, block_3d_flat, virtual_grid, sh_mem_size_flat);
}
{
cout << "## Benchmark 3d big tile - inlined idxs - virtual (rem/div) - flat load (div/rem) - singleDim grid ##";
Kernel3dVirtual kfun = virtual_divrem_big_tile_3d_inlined_flat_divrem_singleDim
<amin_x,amin_y,amin_z
,amax_x,amax_y,amax_z
,group_size_x,group_size_y,group_size_z>;
G.do_run_virtual(kfun, cpu_out, physBlocks, block_3d_flat, virtual_grid, sh_mem_size_flat);
}
{
cout << "## Benchmark 3d big tile - inlined idxs - virtual (add/carry) - flat load (add/carry) - singleDim grid ##";
Kernel3dVirtual kfun = virtual_addcarry_big_tile_3d_inlined_flat_addcarry_singleDim
<amin_x,amin_y,amin_z
,amax_x,amax_y,amax_z
,group_size_x,group_size_y,group_size_z>;
G.do_run_virtual(kfun, cpu_out, physBlocks, block_3d_flat, virtual_grid, sh_mem_size_flat);
}*/
constexpr int strip_x = 1 << strip_pow_x;
constexpr int strip_y = 1 << strip_pow_y;
constexpr int strip_z = 1 << strip_pow_z;
constexpr int strip_size_x = group_size_x*strip_x;
constexpr int strip_size_y = group_size_y*strip_y;
constexpr int strip_size_z = group_size_z*strip_z;
constexpr int sh_x = strip_size_x + amax_x - amin_x;
constexpr int sh_y = strip_size_y + amax_y - amin_y;
constexpr int sh_z = strip_size_z + amax_z - amin_z;
constexpr int strip_sh_total = sh_x * sh_y * sh_z;
constexpr int strip_sh_total_mem_usage = strip_sh_total * sizeof(T);
//printf("shared memory used = %d B\n", strip_sh_total_mem_usage);
constexpr int max_shared_mem = 0xc000; // 48KiB
static_assert(strip_sh_total_mem_usage < max_shared_mem,
"Current configuration requires too much shared memory\n");
// this should technically be measured but it it div by (2^n) so it is very fast, and won't matter much.
const int3 strip_grid = {
int(divUp(lens.x, long(strip_size_x))),
int(divUp(lens.y, long(strip_size_y))),
int(divUp(lens.z, long(strip_size_z)))};
const int strip_grid_flat = product(strip_grid);
{
//cout << "## Benchmark 3d big tile - inlined idxs - stripmined: ";
//printf("strip_size=[%d][%d][%d]f32 ", strip_size_z, strip_size_y, strip_size_x);
//cout << "- flat load (add/carry) - singleDim grid ##";
Kernel3dPhysSingleDim kfun = stripmine_big_tile_3d_inlined_flat_addcarry_singleDim
<amin_x,amin_y,amin_z
,amax_x,amax_y,amax_z
,group_size_x,group_size_y,group_size_z
,strip_x,strip_y,strip_z
>;
G.do_run_singleDim(kfun, cpu_out, strip_grid_flat, blockDim_flat, strip_grid, strip_sh_total_mem_usage,false);
G.do_run_singleDim(kfun, cpu_out, strip_grid_flat, blockDim_flat, strip_grid, strip_sh_total_mem_usage);
}
/*{
cout << "## Benchmark 3d big tile - inlined idxs - stripmined: ";
printf("strip_size=[%d][%d][%d]f32 ", strip_size_z, strip_size_y, strip_size_x);
cout << "- cube loader - singleDim grid ##";
Kernel3dPhysSingleDim kfun = stripmine_big_tile_3d_inlined_cube_singleDim
<amin_x,amin_y,amin_z
,amax_x,amax_y,amax_z
,group_size_x,group_size_y,group_size_z
,strip_x,strip_y,strip_z
>;
G.do_run_singleDim(kfun, cpu_out, strip_grid_flat, blockDim_flat, strip_grid, strip_sh_total_mem_usage);
}
{
cout << "## Benchmark 3d big tile - inlined idxs - stripmined: ";
printf("strip_size=[%d][%d][%d]f32 ", strip_size_z, strip_size_y, strip_size_x);
cout << "- virtual (add/carry) - flat load (add/carry) - singleDim grid ##";
Kernel3dVirtual kfun = virtual_addcarry_stripmine_big_tile_3d_inlined_flat_addcarry_singleDim
<amin_x,amin_y,amin_z
,amax_x,amax_y,amax_z
,group_size_x,group_size_y,group_size_z
,strip_x,strip_y,strip_z
>;
G.do_run_virtual(kfun, cpu_out, physBlocks, block_3d_flat, strip_grid, strip_sh_total_mem_usage);
}*/
}
free(cpu_out);
(void)block_3d;
(void)block_3d_flat;
(void)grid_3d;
(void)virtual_grid_spans;
(void)virtual_grid_flat;
(void)sh_mem_size_flat;
}
template
<const int gx, const int gy, const int gz
,const int sx, const int sy, const int sz>
__host__
void testStrips(const int physBlocks){
doTest_3D<-1,0, -1,0, -1,0, gx,gy,gz,sx,sy,sz>(physBlocks);
doTest_3D<-1,1, -1,0, -1,0, gx,gy,gz,sx,sy,sz>(physBlocks);
doTest_3D<-1,1, -1,1, -1,0, gx,gy,gz,sx,sy,sz>(physBlocks);
doTest_3D<-1,1, -1,0, -1,1, gx,gy,gz,sx,sy,sz>(physBlocks);
doTest_3D<-1,1, -1,1, -1,1, gx,gy,gz,sx,sy,sz>(physBlocks);
doTest_3D<-1,2, -1,1, -1,1, gx,gy,gz,sx,sy,sz>(physBlocks);
doTest_3D<-1,2, -1,2, -1,1, gx,gy,gz,sx,sy,sz>(physBlocks);
doTest_3D<-1,2, -1,1, -1,2, gx,gy,gz,sx,sy,sz>(physBlocks);
doTest_3D<-1,2, -1,2, -1,2, gx,gy,gz,sx,sy,sz>(physBlocks);
doTest_3D<-1,3, -1,2, -1,2, gx,gy,gz,sx,sy,sz>(physBlocks);
doTest_3D<-1,3, -1,3, -1,2, gx,gy,gz,sx,sy,sz>(physBlocks);
doTest_3D<-1,3, -1,2, -1,3, gx,gy,gz,sx,sy,sz>(physBlocks);
doTest_3D<-1,3, -1,3, -1,3, gx,gy,gz,sx,sy,sz>(physBlocks);
}
__host__
int main()
{
constexpr int gps_x = 32;
constexpr int gps_y = 4;
constexpr int gps_z = 2;
constexpr int gps_flat = gps_x * gps_y * gps_z;
int physBlocks = getPhysicalBlockCount<gps_flat>();
#ifdef Jacobi3D
cout << "running Jacobi 3D" << endl;
#else
cout << "running Dense stencil with mean" << endl;
#endif
// small test samples.
/*
doTest_3D<0,1,0,1,0,1, gps_x,gps_y,gps_z,1,1,1>(physBlocks);
doTest_3D<-1,1,0,1,0,1, gps_x,gps_y,gps_z,1,1,1>(physBlocks);
doTest_3D<-1,1,0,1,0,1, gps_x,gps_y,gps_z,1,1,1>(physBlocks);
// 0 < amin
doTest_3D<1,2,1,2,1,2, gps_x,gps_y,gps_z,0,1,2>(physBlocks);
// 0 > amax
doTest_3D<-2,-1,-2,-1,-2,-1, gps_x,gps_y,gps_z,0,1,2>(physBlocks);
// z axis is only in use
doTest_3D<-1,1,0,0,0,0, gps_x,gps_y,gps_z,0,0,3>(physBlocks);
doTest_3D<-2,2,0,0,0,0, gps_x,gps_y,gps_z,0,0,3>(physBlocks);
doTest_3D<-3,3,0,0,0,0, gps_x,gps_y,gps_z,0,0,3>(physBlocks);
doTest_3D<-4,4,0,0,0,0, gps_x,gps_y,gps_z,0,0,3>(physBlocks);
doTest_3D<-5,5,0,0,0,0, gps_x,gps_y,gps_z,0,0,3>(physBlocks);
// z-y axis are only in use
doTest_3D<-1,1,-1,1,0,0, gps_x,gps_y,gps_z,0,1,1>(physBlocks);
doTest_3D<-2,2,-2,2,0,0, gps_x,gps_y,gps_z,0,1,1>(physBlocks);
doTest_3D<-3,3,-3,3,0,0, gps_x,gps_y,gps_z,0,0,1>(physBlocks);
doTest_3D<-4,4,-4,4,0,0, gps_x,gps_y,gps_z,0,0,1>(physBlocks);
doTest_3D<-5,5,-5,5,0,0, gps_x,gps_y,gps_z,0,0,0>(physBlocks);
// z-x axis are only in use
doTest_3D<-1,1,0,0,-1,1, gps_x,gps_y,gps_z,1,0,1>(physBlocks);
doTest_3D<-2,2,0,0,-2,2, gps_x,gps_y,gps_z,1,0,1>(physBlocks);
doTest_3D<-3,3,0,0,-3,3, gps_x,gps_y,gps_z,0,0,1>(physBlocks);
doTest_3D<-4,4,0,0,-4,4, gps_x,gps_y,gps_z,0,0,1>(physBlocks);
doTest_3D<-5,5,0,0,-5,5, gps_x,gps_y,gps_z,0,0,0>(physBlocks);
*/
// all axis are in use
doTest_3D<0,1,0,1,0,1, 32,2,2,0,0,0>(physBlocks);
doTest_3D<-1,1,0,1,0,1, 32,2,2,0,0,0>(physBlocks);
doTest_3D<-1,1,-1,1,0,1, 32,2,2,0,0,0>(physBlocks);
doTest_3D<-1,1,-1,1,-1,1, 32,2,2,0,0,0>(physBlocks);
doTest_3D<0,1,0,1,0,1, 32,4,2,0,0,0>(physBlocks);
doTest_3D<-1,1,0,1,0,1, 32,4,2,0,0,0>(physBlocks);
doTest_3D<-1,1,-1,1,0,1, 32,4,2,0,0,0>(physBlocks);
doTest_3D<-1,1,-1,1,-1,1, 32,4,2,0,0,0>(physBlocks);
doTest_3D<0,1,0,1,0,1, 32,8,2,0,0,0>(physBlocks);
doTest_3D<-1,1,0,1,0,1, 32,8,2,0,0,0>(physBlocks);
doTest_3D<-1,1,-1,1,0,1, 32,8,2,0,0,0>(physBlocks);
doTest_3D<-1,1,-1,1,-1,1, 32,8,2,0,0,0>(physBlocks);
doTest_3D<0,1,0,1,0,1, 32,8,4,0,0,0>(physBlocks);
doTest_3D<-1,1,0,1,0,1, 32,8,4,0,0,0>(physBlocks);
doTest_3D<-1,1,-1,1,0,1, 32,8,4,0,0,0>(physBlocks);
doTest_3D<-1,1,-1,1,-1,1, 32,8,4,0,0,0>(physBlocks);
//blocksize test
/*
doTest_3D<0,1,0,1,0,1, 32,8,1,0,0,0>(physBlocks);
doTest_3D<-1,1,0,1,0,1, 32,8,1,0,0,0>(physBlocks);
doTest_3D<-1,1,-1,1,0,1, 32,8,1,0,0,0>(physBlocks);
doTest_3D<-1,1,-1,1,-1,1, 32,8,1,0,0,0>(physBlocks);
doTest_3D<0,1,0,1,0,1, 32,4,2,0,0,0>(physBlocks);
doTest_3D<-1,1,0,1,0,1, 32,4,2,0,0,0>(physBlocks);
doTest_3D<-1,1,-1,1,0,1, 32,4,2,0,0,0>(physBlocks);
doTest_3D<-1,1,-1,1,-1,1, 32,4,2,0,0,0>(physBlocks);
doTest_3D<0,1,0,1,0,1, 32,8,4,0,0,0>(physBlocks);
doTest_3D<-1,1,0,1,0,1, 32,8,4,0,0,0>(physBlocks);
doTest_3D<-1,1,-1,1,0,1, 32,8,4,0,0,0>(physBlocks);
doTest_3D<-1,1,-1,1,-1,1, 32,8,4,0,0,0>(physBlocks);
doTest_3D<0,1,0,1,0,1, 32,16,2,0,0,0>(physBlocks);
doTest_3D<-1,1,0,1,0,1, 32,16,2,0,0,0>(physBlocks);
doTest_3D<-1,1,-1,1,0,1, 32,16,2,0,0,0>(physBlocks);
doTest_3D<-1,1,-1,1,-1,1, 32,16,2,0,0,0>(physBlocks);
*/
/*
doTest_3D<-2,2,-2,2,-2,2, gps_x,gps_y,gps_z,0,0,1>(physBlocks);
doTest_3D<-3,3,-3,3,-3,3, gps_x,gps_y,gps_z,0,0,1>(physBlocks);
doTest_3D<-4,4,-4,4,-4,4, gps_x,gps_y,gps_z,0,0,1>(physBlocks);
doTest_3D<-5,5,-5,5,-5,5, gps_x,gps_y,gps_z,0,0,0>(physBlocks);
*/
constexpr int gx=32;
constexpr int gy=8;
constexpr int gz=4;
//printf("strip_size=[%d][%d][%d]f32 ", 0, 0, 0);
//testStrips<gx,gy,gz,0,0,0>(physBlocks);
//testStrips<gx,gy,gz,1,0,0>(physBlocks);
//testStrips<gx,gy,gz,0,1,0>(physBlocks);
//testStrips<gx,gy,gz,0,0,1>(physBlocks);
//testStrips<gx,gy,gz,1,0,1>(physBlocks);
//testStrips<gx,gy,gz,1,1,0>(physBlocks);
//testStrips<gx,gy,gz,0,1,1>(physBlocks);
//testStrips<gx,gy,gz,1,1,1>(physBlocks);
//testStrips<gx,gy,gz,0,1,1>(physBlocks);
//testStrips<gx,gy,gz,0,0,1>(physBlocks);
//testStrips<gx,gy,gz,1,0,2>(physBlocks);
//testStrips<gx,gy,gz,0,2,1>(physBlocks);
//testStrips<gx,gy,gz,1,2,0>(physBlocks);
//testStrips<gx,gy,gz,2,1,0>(physBlocks);
//testStrips<gx,gy,gz,2,0,1>(physBlocks);
return 0;
}
|
dd1f8e6d83148d903b40867423820eb493bd79ae.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void cuda_say_hi() {
printf("Hi, CUDA!\n");
} | dd1f8e6d83148d903b40867423820eb493bd79ae.cu | #include "includes.h"
__global__ void cuda_say_hi() {
printf("Hi, CUDA!\n");
} |
224062157872340c3e8fd6839056262c197c7fcb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void VecAdd(float* A, float* B, float* C, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i < N)
C[i] = A[i] + B[i];
} | 224062157872340c3e8fd6839056262c197c7fcb.cu | #include "includes.h"
__global__ void VecAdd(float* A, float* B, float* C, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i < N)
C[i] = A[i] + B[i];
} |
1765a52d7eda9115c60da872f1c9f51d1655c33d.hip | // !!! This is a file automatically generated by hipify!!!
#include "utils.cuh"
#include <sstream>
#include <fstream>
#include "helper_math.h"
#include "tsdf.cuh"
// parse camera position to projection matrix
cv::Mat parse_extrinsic(const std::vector<double>& list) {
cv::Vec3d axis{ list[3], list[4], list[5] };
double axis_norm = sqrt(axis[0] * axis[0] + axis[1] * axis[1] + axis[2] * axis[2]);
double theta = 2 * atan2(axis_norm, list[6]);
axis = axis / axis_norm;
cv::Mat rotation;
cv::Vec3d rod = theta * axis;
cv::Rodrigues(rod, rotation);
cv::Mat extrinsic = cv::Mat::eye(4, 4, CV_64F);
rotation.copyTo(extrinsic(cv::Rect(0, 0, 3, 3)));
cv::Mat translation(3, 1, CV_64F, (void*)list.data());
translation.copyTo(extrinsic(cv::Rect(3, 0, 1, 3)));
extrinsic.convertTo(extrinsic, CV_32F);
return extrinsic.inv();
}
cv::Mat inv_extrinsic(const cv::Mat& extrinsic) {
cv::Mat rotation = extrinsic(cv::Rect(0, 0, 3, 3));
cv::Mat translation = extrinsic(cv::Rect(3, 0, 1, 3));
cv::Mat result(3, 4, CV_64F);
cv::Mat rotation_inv = rotation.inv();
cv::Mat t_prime = -rotation_inv * translation;
rotation_inv.copyTo(result(cv::Rect(0, 0, 3, 3)));
t_prime.copyTo(result(cv::Rect(3, 0, 1, 3)));
std::cout << result << std::endl;
return result;
}
cv::Mat mult_extrinsic(const cv::Mat& extrinsic1, const cv::Mat& extrinsic2) {
cv::Mat result(3, 4, CV_64F);
result(cv::Rect(0, 0, 3, 3)) = extrinsic1(cv::Rect(0, 0, 3, 3)) * extrinsic2(cv::Rect(0, 0, 3, 3));
result(cv::Rect(3, 0, 1, 3)) = extrinsic1(cv::Rect(0, 0, 3, 3)) * extrinsic2(cv::Rect(3, 0, 1, 3)) + extrinsic1(cv::Rect(3, 0, 1, 3));
return result;
}
cv::Mat pack_tsdf_color(float* tsdf_ptr, uint8_t* color_ptr) {
cv::Mat color(4096, 4096, CV_8UC3, color_ptr);
cv::Mat tsdf(4096, 4096, CV_32FC1, tsdf_ptr);
cv::Mat result(color.rows, color.cols, CV_32FC4, cv::Scalar(0));
cv::Mat color_normed;
color.convertTo(color_normed, CV_32FC3, 1. / 255.);
cv::Mat colors[4];
cv::split(color_normed, colors);
colors[3] = tsdf;
cv::merge(colors, 4, result);
/*cv::Mat test[4];
cv::split(result, test);*/
//cv::imshow("test", color_normed);
//cv::waitKey(0);
return result;
}
std::map<double, std::vector<double> > read_trajactory(std::string filename) {
std::map<double, std::vector<double> > result;
std::string line;
std::ifstream infile(filename.c_str());
while (std::getline(infile, line))
{
std::istringstream iss(line);
double ts, tx, ty, tz, qx, qy, qz, qw;
if (!(iss >> ts >> tx >> ty >> tz >> qx >> qy >> qz >> qw)) continue;
std::vector<double> pos = { tx, ty, tz, qx, qy, qz, qw };
result.insert(std::make_pair(::fmod(ts, 1e5), pos));
}
return result;
}
float mean_depth(const cv::Mat& depth) {
int cnt = depth.rows * depth.cols;
uint16_t *ptr = (uint16_t*)depth.data;
double sum = 0;
int total = 0;
for (int i = 0; i < cnt; i++) {
if (ptr[i] == 0)
{
continue;
}
sum += ptr[i] / 5000.;
total++;
}
return static_cast<float>(sum / total);
}
template <typename T>
__device__ T mix(T a, T b, float interp) {
return (1 - interp) * a + interp * b;
}
__device__ float interp_tsdf_diff(const float3& pos, const float3& vol_start, const float3& voxel, const int3& vol_dim, float *tsdf_diff) {
float3 idx = (pos - vol_start) / voxel;
int3 floored_idx = make_int3(floorf(idx.x), floorf(idx.y), floorf(idx.z));
float3 frac_idx = idx - make_float3(floored_idx.x, floored_idx.y, floored_idx.z);
int base_idx = vol_dim.y * vol_dim.z * floored_idx.x + vol_dim.z * floored_idx.y + floored_idx.z;
float diffs[8];
for (uint8_t i = 0; i < 2; ++i)
{
for (uint8_t j = 0; j < 2; ++j)
{
for (uint8_t k = 0; k < 2; ++k)
{
int vol_idx = base_idx + vol_dim.y * vol_dim.z * i + vol_dim.z * j + k;
diffs[i * 4 + j * 2 + k] = tsdf_diff[vol_idx];
}
}
}
float low = mix(mix(diffs[0], diffs[4], frac_idx.x), mix(diffs[2], diffs[6], frac_idx.x), frac_idx.y);
float high = mix(mix(diffs[1], diffs[5], frac_idx.x), mix(diffs[3], diffs[7], frac_idx.x), frac_idx.y);
return mix(low, high, frac_idx.z);
}
__device__ uchar3 interp_tsdf_color(const float3& pos, const float3& vol_start, const float3& voxel, const int3& vol_dim, uchar3 *tsdf_color) {
float3 idx = (pos - vol_start) / voxel;
int3 floored_idx = make_int3(floorf(idx.x), floorf(idx.y), floorf(idx.z));
float3 frac_idx = idx - make_float3(floored_idx.x, floored_idx.y, floored_idx.z);
int base_idx = vol_dim.y * vol_dim.z * floored_idx.x + vol_dim.z * floored_idx.y + floored_idx.z;
float3 colors[8];
for (uint8_t i = 0; i < 2; ++i)
{
for (uint8_t j = 0; j < 2; ++j)
{
for (uint8_t k = 0; k < 2; ++k)
{
int vol_idx = base_idx + vol_dim.y * vol_dim.z * i + vol_dim.z * j + k;
colors[i * 4 + j * 2 + k] = make_float3(tsdf_color[vol_idx].x, tsdf_color[vol_idx].y, tsdf_color[vol_idx].z);
}
}
}
float3 low = mix(mix(colors[0], colors[4], frac_idx.x), mix(colors[2], colors[6], frac_idx.x), frac_idx.y);
float3 high = mix(mix(colors[1], colors[5], frac_idx.x), mix(colors[3], colors[7], frac_idx.x), frac_idx.y);
float3 res = mix(low, high, frac_idx.z);
return make_uchar3(res.x, res.y, res.z);
}
__device__ void interp_tsdf_cnt(const float3& pos, const float3& vol_start, const float3& voxel, const int3& vol_dim, uint32_t *tsdf_cnt, float *out) {
float3 idx = (pos - vol_start) / voxel;
int3 floored_idx = make_int3(floorf(idx.x), floorf(idx.y), floorf(idx.z));
float3 frac_idx = idx - make_float3(floored_idx.x, floored_idx.y, floored_idx.z);
int base_idx = vol_dim.y * vol_dim.z * floored_idx.x + vol_dim.z * floored_idx.y + floored_idx.z;
for (uint8_t m = 0; m < MAX_OBJECTS / 4; ++m)
{
float4 diffs[8];
for (uint8_t i = 0; i < 2; ++i)
{
for (uint8_t j = 0; j < 2; ++j)
{
for (uint8_t k = 0; k < 2; ++k)
{
int vol_idx = base_idx + vol_dim.y * vol_dim.z * i + vol_dim.z * j + k;
uint4 tmp = *(uint4*)&tsdf_cnt[vol_idx * MAX_OBJECTS + m * 4];
diffs[i * 4 + j * 2 + k] = make_float4(tmp.x, tmp.y, tmp.z, tmp.w);
}
}
}
float4 low = mix(mix(diffs[0], diffs[4], frac_idx.x), mix(diffs[2], diffs[6], frac_idx.x), frac_idx.y);
float4 high = mix(mix(diffs[1], diffs[5], frac_idx.x), mix(diffs[3], diffs[7], frac_idx.x), frac_idx.y);
*(float4*)&out[m * 4] = mix(low, high, frac_idx.z);
}
return;
}
| 1765a52d7eda9115c60da872f1c9f51d1655c33d.cu | #include "utils.cuh"
#include <sstream>
#include <fstream>
#include "helper_math.h"
#include "tsdf.cuh"
// parse camera position to projection matrix
cv::Mat parse_extrinsic(const std::vector<double>& list) {
cv::Vec3d axis{ list[3], list[4], list[5] };
double axis_norm = sqrt(axis[0] * axis[0] + axis[1] * axis[1] + axis[2] * axis[2]);
double theta = 2 * atan2(axis_norm, list[6]);
axis = axis / axis_norm;
cv::Mat rotation;
cv::Vec3d rod = theta * axis;
cv::Rodrigues(rod, rotation);
cv::Mat extrinsic = cv::Mat::eye(4, 4, CV_64F);
rotation.copyTo(extrinsic(cv::Rect(0, 0, 3, 3)));
cv::Mat translation(3, 1, CV_64F, (void*)list.data());
translation.copyTo(extrinsic(cv::Rect(3, 0, 1, 3)));
extrinsic.convertTo(extrinsic, CV_32F);
return extrinsic.inv();
}
cv::Mat inv_extrinsic(const cv::Mat& extrinsic) {
cv::Mat rotation = extrinsic(cv::Rect(0, 0, 3, 3));
cv::Mat translation = extrinsic(cv::Rect(3, 0, 1, 3));
cv::Mat result(3, 4, CV_64F);
cv::Mat rotation_inv = rotation.inv();
cv::Mat t_prime = -rotation_inv * translation;
rotation_inv.copyTo(result(cv::Rect(0, 0, 3, 3)));
t_prime.copyTo(result(cv::Rect(3, 0, 1, 3)));
std::cout << result << std::endl;
return result;
}
cv::Mat mult_extrinsic(const cv::Mat& extrinsic1, const cv::Mat& extrinsic2) {
cv::Mat result(3, 4, CV_64F);
result(cv::Rect(0, 0, 3, 3)) = extrinsic1(cv::Rect(0, 0, 3, 3)) * extrinsic2(cv::Rect(0, 0, 3, 3));
result(cv::Rect(3, 0, 1, 3)) = extrinsic1(cv::Rect(0, 0, 3, 3)) * extrinsic2(cv::Rect(3, 0, 1, 3)) + extrinsic1(cv::Rect(3, 0, 1, 3));
return result;
}
cv::Mat pack_tsdf_color(float* tsdf_ptr, uint8_t* color_ptr) {
cv::Mat color(4096, 4096, CV_8UC3, color_ptr);
cv::Mat tsdf(4096, 4096, CV_32FC1, tsdf_ptr);
cv::Mat result(color.rows, color.cols, CV_32FC4, cv::Scalar(0));
cv::Mat color_normed;
color.convertTo(color_normed, CV_32FC3, 1. / 255.);
cv::Mat colors[4];
cv::split(color_normed, colors);
colors[3] = tsdf;
cv::merge(colors, 4, result);
/*cv::Mat test[4];
cv::split(result, test);*/
//cv::imshow("test", color_normed);
//cv::waitKey(0);
return result;
}
std::map<double, std::vector<double> > read_trajactory(std::string filename) {
std::map<double, std::vector<double> > result;
std::string line;
std::ifstream infile(filename.c_str());
while (std::getline(infile, line))
{
std::istringstream iss(line);
double ts, tx, ty, tz, qx, qy, qz, qw;
if (!(iss >> ts >> tx >> ty >> tz >> qx >> qy >> qz >> qw)) continue;
std::vector<double> pos = { tx, ty, tz, qx, qy, qz, qw };
result.insert(std::make_pair(std::fmod(ts, 1e5), pos));
}
return result;
}
float mean_depth(const cv::Mat& depth) {
int cnt = depth.rows * depth.cols;
uint16_t *ptr = (uint16_t*)depth.data;
double sum = 0;
int total = 0;
for (int i = 0; i < cnt; i++) {
if (ptr[i] == 0)
{
continue;
}
sum += ptr[i] / 5000.;
total++;
}
return static_cast<float>(sum / total);
}
template <typename T>
__device__ T mix(T a, T b, float interp) {
return (1 - interp) * a + interp * b;
}
__device__ float interp_tsdf_diff(const float3& pos, const float3& vol_start, const float3& voxel, const int3& vol_dim, float *tsdf_diff) {
float3 idx = (pos - vol_start) / voxel;
int3 floored_idx = make_int3(floorf(idx.x), floorf(idx.y), floorf(idx.z));
float3 frac_idx = idx - make_float3(floored_idx.x, floored_idx.y, floored_idx.z);
int base_idx = vol_dim.y * vol_dim.z * floored_idx.x + vol_dim.z * floored_idx.y + floored_idx.z;
float diffs[8];
for (uint8_t i = 0; i < 2; ++i)
{
for (uint8_t j = 0; j < 2; ++j)
{
for (uint8_t k = 0; k < 2; ++k)
{
int vol_idx = base_idx + vol_dim.y * vol_dim.z * i + vol_dim.z * j + k;
diffs[i * 4 + j * 2 + k] = tsdf_diff[vol_idx];
}
}
}
float low = mix(mix(diffs[0], diffs[4], frac_idx.x), mix(diffs[2], diffs[6], frac_idx.x), frac_idx.y);
float high = mix(mix(diffs[1], diffs[5], frac_idx.x), mix(diffs[3], diffs[7], frac_idx.x), frac_idx.y);
return mix(low, high, frac_idx.z);
}
__device__ uchar3 interp_tsdf_color(const float3& pos, const float3& vol_start, const float3& voxel, const int3& vol_dim, uchar3 *tsdf_color) {
float3 idx = (pos - vol_start) / voxel;
int3 floored_idx = make_int3(floorf(idx.x), floorf(idx.y), floorf(idx.z));
float3 frac_idx = idx - make_float3(floored_idx.x, floored_idx.y, floored_idx.z);
int base_idx = vol_dim.y * vol_dim.z * floored_idx.x + vol_dim.z * floored_idx.y + floored_idx.z;
float3 colors[8];
for (uint8_t i = 0; i < 2; ++i)
{
for (uint8_t j = 0; j < 2; ++j)
{
for (uint8_t k = 0; k < 2; ++k)
{
int vol_idx = base_idx + vol_dim.y * vol_dim.z * i + vol_dim.z * j + k;
colors[i * 4 + j * 2 + k] = make_float3(tsdf_color[vol_idx].x, tsdf_color[vol_idx].y, tsdf_color[vol_idx].z);
}
}
}
float3 low = mix(mix(colors[0], colors[4], frac_idx.x), mix(colors[2], colors[6], frac_idx.x), frac_idx.y);
float3 high = mix(mix(colors[1], colors[5], frac_idx.x), mix(colors[3], colors[7], frac_idx.x), frac_idx.y);
float3 res = mix(low, high, frac_idx.z);
return make_uchar3(res.x, res.y, res.z);
}
__device__ void interp_tsdf_cnt(const float3& pos, const float3& vol_start, const float3& voxel, const int3& vol_dim, uint32_t *tsdf_cnt, float *out) {
float3 idx = (pos - vol_start) / voxel;
int3 floored_idx = make_int3(floorf(idx.x), floorf(idx.y), floorf(idx.z));
float3 frac_idx = idx - make_float3(floored_idx.x, floored_idx.y, floored_idx.z);
int base_idx = vol_dim.y * vol_dim.z * floored_idx.x + vol_dim.z * floored_idx.y + floored_idx.z;
for (uint8_t m = 0; m < MAX_OBJECTS / 4; ++m)
{
float4 diffs[8];
for (uint8_t i = 0; i < 2; ++i)
{
for (uint8_t j = 0; j < 2; ++j)
{
for (uint8_t k = 0; k < 2; ++k)
{
int vol_idx = base_idx + vol_dim.y * vol_dim.z * i + vol_dim.z * j + k;
uint4 tmp = *(uint4*)&tsdf_cnt[vol_idx * MAX_OBJECTS + m * 4];
diffs[i * 4 + j * 2 + k] = make_float4(tmp.x, tmp.y, tmp.z, tmp.w);
}
}
}
float4 low = mix(mix(diffs[0], diffs[4], frac_idx.x), mix(diffs[2], diffs[6], frac_idx.x), frac_idx.y);
float4 high = mix(mix(diffs[1], diffs[5], frac_idx.x), mix(diffs[3], diffs[7], frac_idx.x), frac_idx.y);
*(float4*)&out[m * 4] = mix(low, high, frac_idx.z);
}
return;
}
|
9e68bab5b5eff4a2cda5e2abc521adb41d0b75f1.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
#include "hip/hip_runtime.h"
#include "book.h"
#include "cpu_anim.h"
#define DIM 1024
#define PI 3.1415926535897932f
__global__ void kernel(unsigned char* ptr, int ticks) {
// map from threadIdx/BlockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
// now calculate the value at that position
float fx = x - DIM / 2;
float fy = y - DIM / 2;
float d = sqrtf(fx * fx + fy * fy);
unsigned char grey = (unsigned char)(128.0f + 127.0f *
cos(d / 10.0f - ticks / 7.0f) /
(d / 10.0f + 1.0f));
ptr[offset * 4 + 0] = grey;
ptr[offset * 4 + 1] = grey;
ptr[offset * 4 + 2] = grey;
ptr[offset * 4 + 3] = 255;
}
struct DataBlock {
unsigned char* dev_bitmap;
CPUAnimBitmap* bitmap;
};
void generate_frame(DataBlock* d, int ticks) {
dim3 blocks(DIM / 16, DIM / 16);
dim3 threads(16, 16);
kernel << <blocks, threads >> > (d->dev_bitmap, ticks);
HANDLE_ERROR(hipMemcpy(d->bitmap->get_ptr(),
d->dev_bitmap,
d->bitmap->image_size(),
hipMemcpyDeviceToHost));
}
// clean up memory allocated on the GPU
void cleanup(DataBlock* d) {
HANDLE_ERROR(hipFree(d->dev_bitmap));
}
int main(void) {
DataBlock data;
CPUAnimBitmap bitmap(DIM, DIM, &data);
data.bitmap = &bitmap;
HANDLE_ERROR(hipMalloc((void**)&data.dev_bitmap,
bitmap.image_size()));
bitmap.anim_and_exit((void (*)(void*, int))generate_frame,
(void (*)(void*))cleanup);
}
| 9e68bab5b5eff4a2cda5e2abc521adb41d0b75f1.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
#include "cuda.h"
#include "book.h"
#include "cpu_anim.h"
#define DIM 1024
#define PI 3.1415926535897932f
__global__ void kernel(unsigned char* ptr, int ticks) {
// map from threadIdx/BlockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
// now calculate the value at that position
float fx = x - DIM / 2;
float fy = y - DIM / 2;
float d = sqrtf(fx * fx + fy * fy);
unsigned char grey = (unsigned char)(128.0f + 127.0f *
cos(d / 10.0f - ticks / 7.0f) /
(d / 10.0f + 1.0f));
ptr[offset * 4 + 0] = grey;
ptr[offset * 4 + 1] = grey;
ptr[offset * 4 + 2] = grey;
ptr[offset * 4 + 3] = 255;
}
struct DataBlock {
unsigned char* dev_bitmap;
CPUAnimBitmap* bitmap;
};
void generate_frame(DataBlock* d, int ticks) {
dim3 blocks(DIM / 16, DIM / 16);
dim3 threads(16, 16);
kernel << <blocks, threads >> > (d->dev_bitmap, ticks);
HANDLE_ERROR(cudaMemcpy(d->bitmap->get_ptr(),
d->dev_bitmap,
d->bitmap->image_size(),
cudaMemcpyDeviceToHost));
}
// clean up memory allocated on the GPU
void cleanup(DataBlock* d) {
HANDLE_ERROR(cudaFree(d->dev_bitmap));
}
int main(void) {
DataBlock data;
CPUAnimBitmap bitmap(DIM, DIM, &data);
data.bitmap = &bitmap;
HANDLE_ERROR(cudaMalloc((void**)&data.dev_bitmap,
bitmap.image_size()));
bitmap.anim_and_exit((void (*)(void*, int))generate_frame,
(void (*)(void*))cleanup);
}
|
13b5a01a8c74d84a36a30bd56df37dcb76a1906f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// Created by GS <[email protected]> on 4/6/2018.
//
#include <array/ResultSet.h>
#include <ops/declarable/helpers/diag.h>
namespace sd {
namespace ops {
namespace helpers {
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// diag functor cuda kernel
// outputBuffer - output tensor buffer
// outputShape - output tensor shape
// inputBuffer - input tensor buffer - this tensor should be placed on diagonal position of output
// inputShape - input tensor shape
// inputLength - length for input tensor
//
template <typename T>
static __global__ void diagFunctorKernel(void* outputBuffer, const Nd4jLong* outputShape, void const* inputBuffer, const Nd4jLong* inputShape, Nd4jLong inputLength) {
__shared__ T *z;
__shared__ T const* x;
__shared__ Nd4jLong outputLength;
if (threadIdx.x == 0) {
z = reinterpret_cast<T*>(outputBuffer);
x = reinterpret_cast<T const*>(inputBuffer);
outputLength = shape::length(outputShape);
}
__syncthreads();
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
const auto step = gridDim.x * blockDim.x;
for (int t = tid; t < inputLength; t += step) { // for all vals in input, put all on diagonal position to output
z[shape::getIndexOffset(t * (inputLength + 1), outputShape)] = x[shape::getIndexOffset(t, inputShape)]; //tX];
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// diag part functor cuda kernel
// outputBuffer - output tensor buffer - linear sequence of diagonal values
// outputShape - output tensor shape
// inputBuffer - input tensor buffer - this tensor should be placed on diagonal position of output
// inputShape - input tensor shape
// outputLength - given length of output
// inputLength - given length for input tensor
//
template <typename T>
static __global__ void diagPartFunctorKernel(void* outputBuffer, const Nd4jLong* outputShape, void const* inputBuffer, const Nd4jLong* inputShape, Nd4jLong outputLength, Nd4jLong inputLength) {
__shared__ T *z;
__shared__ T const* x;
if (threadIdx.x == 0) {
z = reinterpret_cast<T*>(outputBuffer);
x = reinterpret_cast<T const*>(inputBuffer);
}
__syncthreads();
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
const auto step = gridDim.x * blockDim.x;
Nd4jLong i = threadIdx.x * (outputLength + 1); // pos to diagonal value
for (int t = tid; t < outputLength && i < inputLength; t += step) { // loop by output, but input matrix may not be square
// put diagonal val from input onto output
z[shape::getIndexOffset(t, outputShape)] = x[shape::getIndexOffset(i, inputShape)];
i += outputLength + 1; // shift to next diagonal value
}
}
//////////////////////////////////////////////////////////////////////////
// Returns a batched matrix tensor with new batched diagonal values.
// for detailed explanations please take a look on web page: https://www.tensorflow.org/api_docs/python/tf/matrix_set_diag
template <typename T>
static void _diagFunctor(sd::LaunchContext * context, const NDArray* input, NDArray* output) {
auto stream = context->getCudaStream();
auto inputLength = input->lengthOf();
dim3 launchDims(256, 512, 8192);
if (!input->isActualOnDeviceSide())
input->syncToDevice();
hipLaunchKernelGGL(( diagFunctorKernel<T>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, output->specialBuffer(), output->specialShapeInfo(), input->specialBuffer(), input->specialShapeInfo(), inputLength);
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// diagFunctor - caller for diag functor processor
void diagFunctor(sd::LaunchContext * context, const NDArray* input, NDArray* output) {
auto xType = input->dataType();
BUILD_SINGLE_SELECTOR(xType, _diagFunctor, (context, input, output), LIBND4J_TYPES);
}
BUILD_SINGLE_TEMPLATE(template void _diagFunctor, (sd::LaunchContext * context, const NDArray* input, NDArray* output);, LIBND4J_TYPES);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// diagPartFunctor - caller for diag part functor kernel
template <typename T>
void _diagPartFunctor(sd::LaunchContext * context, NDArray const* input, NDArray* output) {
const int outLen = output->lengthOf();
const int inLen = input->lengthOf();
auto stream = context->getCudaStream();
dim3 launchDims(256, 512, 8192);
if (!input->isActualOnDeviceSide())
input->syncToDevice();
hipLaunchKernelGGL(( diagPartFunctorKernel<T>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, output->specialBuffer(), output->specialShapeInfo(), input->specialBuffer(), input->specialShapeInfo(), outLen, inLen);
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// diagPartFunctor - caller for diag part functor processor
void diagPartFunctor(sd::LaunchContext * context, NDArray const* input, NDArray* output) {
auto zType = output->dataType();
BUILD_SINGLE_SELECTOR(zType, _diagPartFunctor, (context, input, output), NUMERIC_TYPES);
}
}
}
} | 13b5a01a8c74d84a36a30bd56df37dcb76a1906f.cu | /* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// Created by GS <[email protected]> on 4/6/2018.
//
#include <array/ResultSet.h>
#include <ops/declarable/helpers/diag.h>
namespace sd {
namespace ops {
namespace helpers {
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// diag functor cuda kernel
// outputBuffer - output tensor buffer
// outputShape - output tensor shape
// inputBuffer - input tensor buffer - this tensor should be placed on diagonal position of output
// inputShape - input tensor shape
// inputLength - length for input tensor
//
template <typename T>
static __global__ void diagFunctorKernel(void* outputBuffer, const Nd4jLong* outputShape, void const* inputBuffer, const Nd4jLong* inputShape, Nd4jLong inputLength) {
__shared__ T *z;
__shared__ T const* x;
__shared__ Nd4jLong outputLength;
if (threadIdx.x == 0) {
z = reinterpret_cast<T*>(outputBuffer);
x = reinterpret_cast<T const*>(inputBuffer);
outputLength = shape::length(outputShape);
}
__syncthreads();
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
const auto step = gridDim.x * blockDim.x;
for (int t = tid; t < inputLength; t += step) { // for all vals in input, put all on diagonal position to output
z[shape::getIndexOffset(t * (inputLength + 1), outputShape)] = x[shape::getIndexOffset(t, inputShape)]; //tX];
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// diag part functor cuda kernel
// outputBuffer - output tensor buffer - linear sequence of diagonal values
// outputShape - output tensor shape
// inputBuffer - input tensor buffer - this tensor should be placed on diagonal position of output
// inputShape - input tensor shape
// outputLength - given length of output
// inputLength - given length for input tensor
//
template <typename T>
static __global__ void diagPartFunctorKernel(void* outputBuffer, const Nd4jLong* outputShape, void const* inputBuffer, const Nd4jLong* inputShape, Nd4jLong outputLength, Nd4jLong inputLength) {
__shared__ T *z;
__shared__ T const* x;
if (threadIdx.x == 0) {
z = reinterpret_cast<T*>(outputBuffer);
x = reinterpret_cast<T const*>(inputBuffer);
}
__syncthreads();
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
const auto step = gridDim.x * blockDim.x;
Nd4jLong i = threadIdx.x * (outputLength + 1); // pos to diagonal value
for (int t = tid; t < outputLength && i < inputLength; t += step) { // loop by output, but input matrix may not be square
// put diagonal val from input onto output
z[shape::getIndexOffset(t, outputShape)] = x[shape::getIndexOffset(i, inputShape)];
i += outputLength + 1; // shift to next diagonal value
}
}
//////////////////////////////////////////////////////////////////////////
// Returns a batched matrix tensor with new batched diagonal values.
// for detailed explanations please take a look on web page: https://www.tensorflow.org/api_docs/python/tf/matrix_set_diag
template <typename T>
static void _diagFunctor(sd::LaunchContext * context, const NDArray* input, NDArray* output) {
auto stream = context->getCudaStream();
auto inputLength = input->lengthOf();
dim3 launchDims(256, 512, 8192);
if (!input->isActualOnDeviceSide())
input->syncToDevice();
diagFunctorKernel<T><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(output->specialBuffer(), output->specialShapeInfo(), input->specialBuffer(), input->specialShapeInfo(), inputLength);
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// diagFunctor - caller for diag functor processor
void diagFunctor(sd::LaunchContext * context, const NDArray* input, NDArray* output) {
auto xType = input->dataType();
BUILD_SINGLE_SELECTOR(xType, _diagFunctor, (context, input, output), LIBND4J_TYPES);
}
BUILD_SINGLE_TEMPLATE(template void _diagFunctor, (sd::LaunchContext * context, const NDArray* input, NDArray* output);, LIBND4J_TYPES);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// diagPartFunctor - caller for diag part functor kernel
template <typename T>
void _diagPartFunctor(sd::LaunchContext * context, NDArray const* input, NDArray* output) {
const int outLen = output->lengthOf();
const int inLen = input->lengthOf();
auto stream = context->getCudaStream();
dim3 launchDims(256, 512, 8192);
if (!input->isActualOnDeviceSide())
input->syncToDevice();
diagPartFunctorKernel<T><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(output->specialBuffer(), output->specialShapeInfo(), input->specialBuffer(), input->specialShapeInfo(), outLen, inLen);
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// diagPartFunctor - caller for diag part functor processor
void diagPartFunctor(sd::LaunchContext * context, NDArray const* input, NDArray* output) {
auto zType = output->dataType();
BUILD_SINGLE_SELECTOR(zType, _diagPartFunctor, (context, input, output), NUMERIC_TYPES);
}
}
}
} |
73bf271fa9b1e5b545434baf550ed3f3b9f14e2e.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime.h>
using namespace std;
__global__ void matmul_kernel(const float* A, const float* B, float* C, unsigned int n) {
int blocksize = blockDim.x;
extern __shared__ float shared_arr[];
float *As = shared_arr;
float *Bs = (float*)&As[blocksize*blocksize];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int aBegin = by * blocksize * n;
int aEnd = aBegin + n-1;
int aStep = blocksize;
int arow = aBegin/n + ty;
int bBegin = bx * blocksize;
int bStep = n * blocksize;
int bcol = bBegin + tx;
int c = n * blocksize * by + blocksize * bx;
float Csub = 0;
for (int a = aBegin, b = bBegin;a <= aEnd;a += aStep, b += bStep) {
As[blocksize*ty + tx] = ((arow < n) && (a + tx <= aEnd))? A[a + n * ty + tx] : 0;
Bs[blocksize*ty + tx] = ((bcol < n) && (a + tx <= aEnd))? B[b + n * ty + tx] : 0;
//Using aEnd condition for zero padding B matrix since column and row dimension of A and B respectively needs to be the same
__syncthreads();
for (int k = 0; k < blocksize; ++k)
Csub += As[blocksize*ty + k] * Bs[blocksize*k + tx];
__syncthreads();
}
if((by*blocksize + ty < n) && (bx*blocksize + tx < n)) {
C[c + n * ty + tx] = Csub;
}
}
__host__ void matmul(const float* A, const float* B, float* C, unsigned int n, unsigned int block_dim) {
dim3 dimBlock(block_dim, block_dim);
dim3 dimGrid( (n + block_dim-1)/block_dim , (n + block_dim-1)/block_dim );
size_t shared_array_size = (2*block_dim*block_dim)*sizeof(float);
hipLaunchKernelGGL(( matmul_kernel), dim3(dimGrid), dim3(dimBlock), shared_array_size, 0, A, B, C, n);
hipDeviceSynchronize();
}
| 73bf271fa9b1e5b545434baf550ed3f3b9f14e2e.cu | #include <iostream>
#include <cuda.h>
using namespace std;
__global__ void matmul_kernel(const float* A, const float* B, float* C, unsigned int n) {
int blocksize = blockDim.x;
extern __shared__ float shared_arr[];
float *As = shared_arr;
float *Bs = (float*)&As[blocksize*blocksize];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int aBegin = by * blocksize * n;
int aEnd = aBegin + n-1;
int aStep = blocksize;
int arow = aBegin/n + ty;
int bBegin = bx * blocksize;
int bStep = n * blocksize;
int bcol = bBegin + tx;
int c = n * blocksize * by + blocksize * bx;
float Csub = 0;
for (int a = aBegin, b = bBegin;a <= aEnd;a += aStep, b += bStep) {
As[blocksize*ty + tx] = ((arow < n) && (a + tx <= aEnd))? A[a + n * ty + tx] : 0;
Bs[blocksize*ty + tx] = ((bcol < n) && (a + tx <= aEnd))? B[b + n * ty + tx] : 0;
//Using aEnd condition for zero padding B matrix since column and row dimension of A and B respectively needs to be the same
__syncthreads();
for (int k = 0; k < blocksize; ++k)
Csub += As[blocksize*ty + k] * Bs[blocksize*k + tx];
__syncthreads();
}
if((by*blocksize + ty < n) && (bx*blocksize + tx < n)) {
C[c + n * ty + tx] = Csub;
}
}
__host__ void matmul(const float* A, const float* B, float* C, unsigned int n, unsigned int block_dim) {
dim3 dimBlock(block_dim, block_dim);
dim3 dimGrid( (n + block_dim-1)/block_dim , (n + block_dim-1)/block_dim );
size_t shared_array_size = (2*block_dim*block_dim)*sizeof(float);
matmul_kernel<<<dimGrid, dimBlock, shared_array_size>>>(A, B, C, n);
cudaDeviceSynchronize();
}
|
3676e8b4c00978d6aade499cca406ef5b2dde325.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Universit Pierre et Marie Curie
* Calcul de transport de neutrons
* Version squentielle
*/
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <time.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#define OUTPUT_FILE "/tmp/absorbed.dat"
#define NB_BLOCK 256
#define NB_THREAD 256
#define CUDA_CALL(x) do { if((x) != hipSuccess) { \
printf("Error at %s:%d\n",__FILE__,__LINE__); \
return EXIT_FAILURE;}} while(0)
// Dclaration dans le mmoire RAM du GPU
__device__ int device_r;
__device__ int device_b;
__device__ int device_t;
char info[] = "\
Usage:\n\
neutron-seq H Nb C_c C_s\n\
\n\
H : paisseur de la plaque\n\
Nb : nombre d'chantillons\n\
C_c: composante absorbante\n\
C_s: componente diffusante\n\
\n\
Exemple d'execution : \n\
neutron-seq 1.0 500000000 0.5 0.5\n\
";
__global__ void setup_kernel(hiprandState_t *state){
int idx = threadIdx.x+blockDim.x*blockIdx.x;
// On initialise chaque gnrateur avec une graine diffrente
hiprand_init(idx, 0, 0, &state[idx]);
/*On initialise chaque gnrateur avec la mme graine mais avec une squence diffrente
Les gnrateur donneront pas les mmes chiffres car chaque squence est spar de 2^67 nombres*/
// hiprand_init(666, idx, 0, &state[idx]);
}
/*
* notre gettimeofday()
*/
double my_gettimeofday(){
struct timeval tmp_time;
gettimeofday(&tmp_time, NULL);
return tmp_time.tv_sec + (tmp_time.tv_usec * 1.0e-6L);
}
__global__ void neutron_gpu(hiprandState_t *state, float h, int n, float c_c, float c_s, float *result)
{
// nombre de neutrons reflchis, absorbs et transmis
int r, b, t;
r = b = t = 0;
// Tableau pour l'criture de chaque thread
__shared__ int R[NB_THREAD];
__shared__ int B[NB_THREAD];
__shared__ int T[NB_THREAD];
float c;
c = c_c + c_s;
// distance parcourue par le neutron avant la collision
float L;
// direction du neutron (0 <= d <= PI)
float d;
// variable alatoire uniforme
float u;
// position de la particule (0 <= x <= h)
float x;
int idx;
idx = threadIdx.x + blockIdx.x * blockDim.x;
// On copie le gnrateur sur le registre pour plus d'efficacit
hiprandState_t localState = state[idx];
/* code GPU */
while(idx < n)
{
d = 0.0;
x = 0.0;
while(1)
{
u = hiprand_uniform(&localState);
L = -(1 / c) * log(u);
x = x + L * cos(d);
if (x < 0)
{
r++;
break;
}
else if (x >= h)
{
t++;
break;
}
else if ((u = hiprand_uniform(&localState)) < c_c / c)
{
b++;
result[idx] = x;
break;
}
else
{
u = hiprand_uniform(&localState);
d = u * M_PI;
}
}
idx+= blockDim.x * gridDim.x;
}
// On stock r,b,t dans le tableau
R[threadIdx.x] = r;
B[threadIdx.x] = b;
T[threadIdx.x] = t;
// Synchronisation avant qu'un thread calcule la somme totale
__syncthreads();
// Reduction des tableaux
for(unsigned int s = blockDim.x/2; s > 0; s = s/2)
{
if(threadIdx.x < s)
{
R[threadIdx.x] += R[threadIdx.x + s];
B[threadIdx.x] += B[threadIdx.x + s];
T[threadIdx.x] += T[threadIdx.x + s];
}
__syncthreads();
}
// Seul le thread 0 d'une bloc va additionner l'ensemble des valeurs
if(threadIdx.x == 0)
{
atomicAdd(&device_r,R[0]);
atomicAdd(&device_b,B[0]);
atomicAdd(&device_t,T[0]);
}
}
/*
* main()
*/
int main(int argc, char *argv[]) {
// La distance moyenne entre les interactions neutron/atome est 1/c.
// c_c et c_s sont les composantes absorbantes et diffusantes de c.
float c_c, c_s;
// paisseur de la plaque
float h;
// nombre d'chantillons
int n;
// chronometrage
hipEvent_t start, finish;
hipEventCreate(&start);
hipEventCreate(&finish);
if( argc == 1)
fprintf( stderr, "%s\n", info);
// valeurs par defaut
h = 1.0;
n = 500000000;
c_c = 0.5;
c_s = 0.5;
// recuperation des parametres
if (argc > 1)
h = atof(argv[1]);
if (argc > 2)
n = atoi(argv[2]);
if (argc > 3)
c_c = atof(argv[3]);
if (argc > 4)
c_s = atof(argv[4]);
// affichage des parametres pour verificatrion
printf("paisseur de la plaque : %4.g\n", h);
printf("Nombre d'chantillons : %d\n", n);
printf("C_c : %g\n", c_c);
printf("C_s : %g\n", c_s);
//Allocation mmoire du rsultat ct CPU
float *host_absorbed;
host_absorbed = (float *) calloc(n, sizeof(float));
int r,b,t;
//Allocation mmoire du rsultat ct GPU
float *device_absorbed;
hipMalloc((void **)&device_absorbed, n*sizeof(float));
hipMemset(device_absorbed,0,n*sizeof(float));
// Allocation mmoire par le CPU du tableau de gnrateur pseudo-alatoire
hiprandState_t *d_state;
CUDA_CALL(hipMalloc((void **)&d_state, NB_BLOCK*NB_THREAD*sizeof(hiprandState_t)));
// debut du chronometrage
hipEventRecord(start, 0);
// On initialise les gnrateurs
hipLaunchKernelGGL(( setup_kernel), dim3(NB_BLOCK),dim3(NB_THREAD), 0, 0, d_state);
hipLaunchKernelGGL(( neutron_gpu), dim3(NB_BLOCK),dim3(NB_THREAD), 0, 0, d_state, h, n, c_c, c_s, device_absorbed);
hipMemcpy(host_absorbed,device_absorbed,n*sizeof(float),hipMemcpyDeviceToHost);
hipMemcpyFromSymbol(&r, device_r, sizeof(int),0);
hipMemcpyFromSymbol(&b, device_b, sizeof(int),0);
hipMemcpyFromSymbol(&t, device_t, sizeof(int),0);
// fin du chronometrage
hipEventRecord(finish, 0);
hipEventSynchronize(finish);
float elapsedTime;
hipEventElapsedTime(&elapsedTime, start, finish);
printf("r=%d, b=%d, t=%d\n",r,b,t);
printf("\nPourcentage des neutrons reflchis : %4.2g\n", (float) r / (float) n);
printf("Pourcentage des neutrons absorbs : %4.2g\n", (float) b / (float) n);
printf("Pourcentage des neutrons transmis : %4.2g\n", (float) t / (float) n);
printf("\nTemps total de calcul: %.8g sec\n", elapsedTime/1000.0);
printf("Millions de neutrons /s: %.2g\n", (double) n / ((elapsedTime/1000.0)*1e6));
// ouverture du fichier pour ecrire les positions des neutrons absorbs
FILE *f_handle = fopen(OUTPUT_FILE, "w");
if (!f_handle) {
fprintf(stderr, "Cannot open " OUTPUT_FILE "\n");
exit(EXIT_FAILURE);
}
for (int j = 0; j < b; j++)
fprintf(f_handle, "%f\n", host_absorbed[j]);
// fermeture du fichier
fclose(f_handle);
printf("Result written in " OUTPUT_FILE "\n");
hipFree(d_state);
hipFree(device_absorbed);
free(host_absorbed);
return EXIT_SUCCESS;
}
| 3676e8b4c00978d6aade499cca406ef5b2dde325.cu | /*
* Université Pierre et Marie Curie
* Calcul de transport de neutrons
* Version séquentielle
*/
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <time.h>
#include <sys/time.h>
#include <cuda.h>
#include <curand.h>
#include <curand_kernel.h>
#define OUTPUT_FILE "/tmp/absorbed.dat"
#define NB_BLOCK 256
#define NB_THREAD 256
#define CUDA_CALL(x) do { if((x) != cudaSuccess) { \
printf("Error at %s:%d\n",__FILE__,__LINE__); \
return EXIT_FAILURE;}} while(0)
// Déclaration dans le mémoire RAM du GPU
__device__ int device_r;
__device__ int device_b;
__device__ int device_t;
char info[] = "\
Usage:\n\
neutron-seq H Nb C_c C_s\n\
\n\
H : épaisseur de la plaque\n\
Nb : nombre d'échantillons\n\
C_c: composante absorbante\n\
C_s: componente diffusante\n\
\n\
Exemple d'execution : \n\
neutron-seq 1.0 500000000 0.5 0.5\n\
";
__global__ void setup_kernel(curandState *state){
int idx = threadIdx.x+blockDim.x*blockIdx.x;
// On initialise chaque générateur avec une graine différente
curand_init(idx, 0, 0, &state[idx]);
/*On initialise chaque générateur avec la même graine mais avec une séquence différente
Les générateur donneront pas les mêmes chiffres car chaque séquence est séparé de 2^67 nombres*/
// curand_init(666, idx, 0, &state[idx]);
}
/*
* notre gettimeofday()
*/
double my_gettimeofday(){
struct timeval tmp_time;
gettimeofday(&tmp_time, NULL);
return tmp_time.tv_sec + (tmp_time.tv_usec * 1.0e-6L);
}
__global__ void neutron_gpu(curandState *state, float h, int n, float c_c, float c_s, float *result)
{
// nombre de neutrons refléchis, absorbés et transmis
int r, b, t;
r = b = t = 0;
// Tableau pour l'écriture de chaque thread
__shared__ int R[NB_THREAD];
__shared__ int B[NB_THREAD];
__shared__ int T[NB_THREAD];
float c;
c = c_c + c_s;
// distance parcourue par le neutron avant la collision
float L;
// direction du neutron (0 <= d <= PI)
float d;
// variable aléatoire uniforme
float u;
// position de la particule (0 <= x <= h)
float x;
int idx;
idx = threadIdx.x + blockIdx.x * blockDim.x;
// On copie le générateur sur le registre pour plus d'efficacité
curandState localState = state[idx];
/* code GPU */
while(idx < n)
{
d = 0.0;
x = 0.0;
while(1)
{
u = curand_uniform(&localState);
L = -(1 / c) * log(u);
x = x + L * cos(d);
if (x < 0)
{
r++;
break;
}
else if (x >= h)
{
t++;
break;
}
else if ((u = curand_uniform(&localState)) < c_c / c)
{
b++;
result[idx] = x;
break;
}
else
{
u = curand_uniform(&localState);
d = u * M_PI;
}
}
idx+= blockDim.x * gridDim.x;
}
// On stock r,b,t dans le tableau
R[threadIdx.x] = r;
B[threadIdx.x] = b;
T[threadIdx.x] = t;
// Synchronisation avant qu'un thread calcule la somme totale
__syncthreads();
// Reduction des tableaux
for(unsigned int s = blockDim.x/2; s > 0; s = s/2)
{
if(threadIdx.x < s)
{
R[threadIdx.x] += R[threadIdx.x + s];
B[threadIdx.x] += B[threadIdx.x + s];
T[threadIdx.x] += T[threadIdx.x + s];
}
__syncthreads();
}
// Seul le thread 0 d'une bloc va additionner l'ensemble des valeurs
if(threadIdx.x == 0)
{
atomicAdd(&device_r,R[0]);
atomicAdd(&device_b,B[0]);
atomicAdd(&device_t,T[0]);
}
}
/*
* main()
*/
int main(int argc, char *argv[]) {
// La distance moyenne entre les interactions neutron/atome est 1/c.
// c_c et c_s sont les composantes absorbantes et diffusantes de c.
float c_c, c_s;
// épaisseur de la plaque
float h;
// nombre d'échantillons
int n;
// chronometrage
cudaEvent_t start, finish;
cudaEventCreate(&start);
cudaEventCreate(&finish);
if( argc == 1)
fprintf( stderr, "%s\n", info);
// valeurs par defaut
h = 1.0;
n = 500000000;
c_c = 0.5;
c_s = 0.5;
// recuperation des parametres
if (argc > 1)
h = atof(argv[1]);
if (argc > 2)
n = atoi(argv[2]);
if (argc > 3)
c_c = atof(argv[3]);
if (argc > 4)
c_s = atof(argv[4]);
// affichage des parametres pour verificatrion
printf("Épaisseur de la plaque : %4.g\n", h);
printf("Nombre d'échantillons : %d\n", n);
printf("C_c : %g\n", c_c);
printf("C_s : %g\n", c_s);
//Allocation mémoire du résultat côté CPU
float *host_absorbed;
host_absorbed = (float *) calloc(n, sizeof(float));
int r,b,t;
//Allocation mémoire du résultat côté GPU
float *device_absorbed;
cudaMalloc((void **)&device_absorbed, n*sizeof(float));
cudaMemset(device_absorbed,0,n*sizeof(float));
// Allocation mémoire par le CPU du tableau de générateur pseudo-aléatoire
curandState *d_state;
CUDA_CALL(cudaMalloc((void **)&d_state, NB_BLOCK*NB_THREAD*sizeof(curandState)));
// debut du chronometrage
cudaEventRecord(start, 0);
// On initialise les générateurs
setup_kernel<<<NB_BLOCK,NB_THREAD>>>(d_state);
neutron_gpu<<<NB_BLOCK,NB_THREAD>>>(d_state, h, n, c_c, c_s, device_absorbed);
cudaMemcpy(host_absorbed,device_absorbed,n*sizeof(float),cudaMemcpyDeviceToHost);
cudaMemcpyFromSymbol(&r, device_r, sizeof(int),0);
cudaMemcpyFromSymbol(&b, device_b, sizeof(int),0);
cudaMemcpyFromSymbol(&t, device_t, sizeof(int),0);
// fin du chronometrage
cudaEventRecord(finish, 0);
cudaEventSynchronize(finish);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, finish);
printf("r=%d, b=%d, t=%d\n",r,b,t);
printf("\nPourcentage des neutrons refléchis : %4.2g\n", (float) r / (float) n);
printf("Pourcentage des neutrons absorbés : %4.2g\n", (float) b / (float) n);
printf("Pourcentage des neutrons transmis : %4.2g\n", (float) t / (float) n);
printf("\nTemps total de calcul: %.8g sec\n", elapsedTime/1000.0);
printf("Millions de neutrons /s: %.2g\n", (double) n / ((elapsedTime/1000.0)*1e6));
// ouverture du fichier pour ecrire les positions des neutrons absorbés
FILE *f_handle = fopen(OUTPUT_FILE, "w");
if (!f_handle) {
fprintf(stderr, "Cannot open " OUTPUT_FILE "\n");
exit(EXIT_FAILURE);
}
for (int j = 0; j < b; j++)
fprintf(f_handle, "%f\n", host_absorbed[j]);
// fermeture du fichier
fclose(f_handle);
printf("Result written in " OUTPUT_FILE "\n");
cudaFree(d_state);
cudaFree(device_absorbed);
free(host_absorbed);
return EXIT_SUCCESS;
}
|
73fed613369d4f91abf95f6fcb9a950053cd7364.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void add(int *a, int *b, int *c)
{
*c = *a + *b;
printf("Result %d ", *c);
} | 73fed613369d4f91abf95f6fcb9a950053cd7364.cu | #include "includes.h"
__global__ void add(int *a, int *b, int *c)
{
*c = *a + *b;
printf("Result %d ", *c);
} |
4eb9fe605af36e75ba5db87f743e1da9d9bfbf40.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream> // Needed to perform IO operations
using namespace std;
#define N 100000
__global__ void add(int n, int *a, int *b, int *c) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
printf("Hello from block %d, thread %d\n", blockIdx.x, threadIdx.x);
for (int i = index; i < n; i += stride) {
c[i] = a[i] + b[i];
}
}
int main(void) {
int blockSize = 256;
int numBlocks = (N + blockSize -1) / blockSize;
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
hipMalloc((void**)&dev_a, N*sizeof(int));
hipMalloc((void**)&dev_b, N*sizeof(int));
hipMalloc((void**)&dev_c, N*sizeof(int));
for (int i=0; i<N; i++) {
a[i] = -i;
b[i] = i*i;
}
hipMemcpy(dev_a, a, N*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, N*sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( add), dim3(numBlocks), dim3(blockSize), 0, 0, N, dev_a, dev_b, dev_c);
hipDeviceSynchronize();
hipMemcpy(c, dev_c, N*sizeof(int), hipMemcpyDeviceToHost);
for (int i=0; i<N; i++) {
printf( "%d + %d = %d\n", a[i], b[i], c[i] );
}
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return 0;
} | 4eb9fe605af36e75ba5db87f743e1da9d9bfbf40.cu | #include <iostream> // Needed to perform IO operations
using namespace std;
#define N 100000
__global__ void add(int n, int *a, int *b, int *c) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
printf("Hello from block %d, thread %d\n", blockIdx.x, threadIdx.x);
for (int i = index; i < n; i += stride) {
c[i] = a[i] + b[i];
}
}
int main(void) {
int blockSize = 256;
int numBlocks = (N + blockSize -1) / blockSize;
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
cudaMalloc((void**)&dev_a, N*sizeof(int));
cudaMalloc((void**)&dev_b, N*sizeof(int));
cudaMalloc((void**)&dev_c, N*sizeof(int));
for (int i=0; i<N; i++) {
a[i] = -i;
b[i] = i*i;
}
cudaMemcpy(dev_a, a, N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, N*sizeof(int), cudaMemcpyHostToDevice);
add<<<numBlocks, blockSize>>>(N, dev_a, dev_b, dev_c);
cudaDeviceSynchronize();
cudaMemcpy(c, dev_c, N*sizeof(int), cudaMemcpyDeviceToHost);
for (int i=0; i<N; i++) {
printf( "%d + %d = %d\n", a[i], b[i], c[i] );
}
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
} |
137dd9a20da342c90ad7b39a0194e91e2f5780f5.hip | // !!! This is a file automatically generated by hipify!!!
/* ----------------------------------------------------------------------------
* Programmer(s): Cody J. Balos @ LLNL
* ----------------------------------------------------------------------------
* Based on work by Donald Wilcox @ LBNL
* ----------------------------------------------------------------------------
* SUNDIALS Copyright Start
* Copyright (c) 2002-2021, Lawrence Livermore National Security
* and Southern Methodist University.
* All rights reserved.
*
* See the top-level LICENSE and NOTICE files for details.
*
* SPDX-License-Identifier: BSD-3-Clause
* SUNDIALS Copyright End
* ----------------------------------------------------------------------------
* Implementation file for cuSolverSp batched QR SUNLinearSolver interface.
* ----------------------------------------------------------------------------*/
#include <stdio.h>
#include <stdlib.h>
#include <sunmatrix/sunmatrix_cusparse.h>
#include <sunlinsol/sunlinsol_cusolversp_batchqr.h>
#include "sundials_cuda.h"
#include "sundials_debug.h"
#define ZERO RCONST(0.0)
#define ONE RCONST(1.0)
#define TWO RCONST(2.0)
/* macros for handling the different function names based on precision */
#if defined(SUNDIALS_DOUBLE_PRECISION)
#define _cusolverSpXcsrqrBufferInfoBatched cusolverSpDcsrqrBufferInfoBatched
#define _cusolverSpXcsrqrsvBatched cusolverSpDcsrqrsvBatched
#elif defined(SUNDIALS_SINGLE_PRECISION)
#define _cusolverSpXcsrqrBufferInfoBatched cusolverSpScsrqrBufferInfoBatched
#define _cusolverSpXcsrqrsvBatched cusolverSpScsrqrsvBatched
#endif
/*
* -----------------------------------------------------------------
* cuSolverSp solver structure accessibility macros:
* -----------------------------------------------------------------
*/
#define SUN_CUSP_CONTENT(S) ( (SUNLinearSolverContent_cuSolverSp_batchQR)(S->content) )
#define SUN_CUSP_QRWORKSPACE(S) ( SUN_CUSP_CONTENT(S)->workspace )
#define SUN_CUSP_FIRSTFACTORIZE(S) ( SUN_CUSP_CONTENT(S)->first_factorize )
#define SUN_CUSP_LASTFLAG(S) ( SUN_CUSP_CONTENT(S)->last_flag )
#define SUN_CUSOL_HANDLE(S) ( SUN_CUSP_CONTENT(S)->cusolver_handle )
#define SUN_CUSP_DESC(S) ( SUN_CUSP_CONTENT(S)->desc )
#define SUN_CUSP_QRINFO(S) ( SUN_CUSP_CONTENT(S)->info )
#define SUN_CUSP_INTERNAL_SIZE(S) ( SUN_CUSP_CONTENT(S)->internal_size )
#define SUN_CUSP_WORK_SIZE(S) ( SUN_CUSP_CONTENT(S)->workspace_size )
/*
* ----------------------------------------------------------------------------
* Implementations of exported functions.
* ----------------------------------------------------------------------------
*/
SUNLinearSolver SUNLinSol_cuSolverSp_batchQR(N_Vector y, SUNMatrix A, cusolverSpHandle_t cusol_handle)
{
/* Check that required arguments are not NULL */
if (y == NULL || A == NULL)
{
SUNDIALS_DEBUG_PRINT("ERROR in SUNLinSol_cuSolverSp_batchQR: y or A is null\n");
return NULL;
}
/* Check compatibility with supplied SUNMatrix and N_Vector */
if (SUNMatGetID(A) != SUNMATRIX_CUSPARSE || y->ops->nvgetdevicearraypointer == NULL)
{
SUNDIALS_DEBUG_PRINT("ERROR in SUNLinSol_cuSolverSp_batchQR: illegal type for y or A\n");
return NULL;
}
/* Matrix and vector dimensions must agree */
if (N_VGetLength(y) != SUNMatrix_cuSparse_Columns(A))
{
SUNDIALS_DEBUG_PRINT("ERROR in SUNLinSol_cuSolverSp_batchQR: matrix and vector dimensions don't agree\n");
return NULL;
}
/* Create an empty linear solver */
SUNLinearSolver S;
S = NULL;
S = SUNLinSolNewEmpty();
if (S == NULL)
{
return NULL;
}
/* Attach operations */
S->ops->gettype = SUNLinSolGetType_cuSolverSp_batchQR;
S->ops->getid = SUNLinSolGetID_cuSolverSp_batchQR;
S->ops->initialize = SUNLinSolInitialize_cuSolverSp_batchQR;
S->ops->setup = SUNLinSolSetup_cuSolverSp_batchQR;
S->ops->solve = SUNLinSolSolve_cuSolverSp_batchQR;
S->ops->lastflag = SUNLinSolLastFlag_cuSolverSp_batchQR;
S->ops->free = SUNLinSolFree_cuSolverSp_batchQR;
/* Create content */
SUNLinearSolverContent_cuSolverSp_batchQR content;
content = NULL;
content = (SUNLinearSolverContent_cuSolverSp_batchQR) malloc(sizeof(*content));
if (content == NULL)
{
SUNLinSolFree(S);
return NULL;
}
/* Attach content */
S->content = content;
/* Fill content */
content->last_flag = SUNLS_SUCCESS;
content->first_factorize = SUNTRUE;
content->internal_size = 0;
content->workspace_size = 0;
content->cusolver_handle = cusol_handle;
content->info = NULL;
content->workspace = NULL;
content->desc = NULL;
return S;
}
/*
* -----------------------------------------------------------------
* Implementation of accessor and setter functions.
* -----------------------------------------------------------------
*/
void SUNLinSol_cuSolverSp_batchQR_GetDescription(SUNLinearSolver S, const char** desc)
{
*desc = SUN_CUSP_DESC(S);
}
void SUNLinSol_cuSolverSp_batchQR_SetDescription(SUNLinearSolver S, const char* desc)
{
SUN_CUSP_DESC(S) = desc;
}
void SUNLinSol_cuSolverSp_batchQR_GetDeviceSpace(SUNLinearSolver S,
size_t* cuSolverInternal,
size_t* cuSolverWorkspace)
{
/* size is in bytes */
*cuSolverInternal = SUN_CUSP_INTERNAL_SIZE(S); /* buffer for Q and R factors */
*cuSolverWorkspace = SUN_CUSP_WORK_SIZE(S); /* numerical factorization buffer */
}
/*
* -----------------------------------------------------------------
* Implementation of linear solver operations
* -----------------------------------------------------------------
*/
SUNLinearSolver_Type SUNLinSolGetType_cuSolverSp_batchQR(SUNLinearSolver S)
{
return(SUNLINEARSOLVER_DIRECT);
}
SUNLinearSolver_ID SUNLinSolGetID_cuSolverSp_batchQR(SUNLinearSolver S)
{
return(SUNLINEARSOLVER_CUSOLVERSP_BATCHQR);
}
int SUNLinSolInitialize_cuSolverSp_batchQR(SUNLinearSolver S)
{
SUN_CUSP_FIRSTFACTORIZE(S) = SUNTRUE;
SUN_CUSP_LASTFLAG(S) = SUNLS_SUCCESS;
return(SUN_CUSP_LASTFLAG(S));
}
int SUNLinSolSetup_cuSolverSp_batchQR(SUNLinearSolver S, SUNMatrix A)
{
int blockrows, blockcols, blocknnz, nblock;
int *d_rowptr, *d_colind;
realtype *d_data;
hipsparseMatDescr_t mat_descr;
hipError_t cuerr;
cusolverStatus_t status;
if (SUN_CUSP_LASTFLAG(S) != SUNLS_SUCCESS)
return SUN_CUSP_LASTFLAG(S);
if (SUN_CUSP_FIRSTFACTORIZE(S))
{
/* Free old workspace and symbloic analysis */
if (SUN_CUSP_QRWORKSPACE(S))
{
hipFree(SUN_CUSP_QRWORKSPACE(S));
cusolverSpDestroyCsrqrInfo(SUN_CUSP_QRINFO(S));
}
/* We must create a new csrqrinfo_t context every time we want to
do a symbolic analysis. Trying to reuse it results in a
CUSOLVER_STATUS_INVALID_VALUE error. */
status = cusolverSpCreateCsrqrInfo(&SUN_CUSP_QRINFO(S));
if (!SUNDIALS_CUSOLVER_VERIFY(status))
{
SUN_CUSP_LASTFLAG(S) = SUNLS_PACKAGE_FAIL_UNREC;
return SUN_CUSP_LASTFLAG(S);
}
nblock = SUNMatrix_cuSparse_NumBlocks(A);
blocknnz = SUNMatrix_cuSparse_BlockNNZ(A);
blockrows = SUNMatrix_cuSparse_BlockRows(A);
blockcols = SUNMatrix_cuSparse_BlockColumns(A);
d_data = SUNMatrix_cuSparse_Data(A);
d_rowptr = SUNMatrix_cuSparse_IndexPointers(A);
d_colind = SUNMatrix_cuSparse_IndexValues(A);
mat_descr = SUNMatrix_cuSparse_MatDescr(A);
/* Perform symbolic analysis of sparsity structure */
status = cusolverSpXcsrqrAnalysisBatched(SUN_CUSOL_HANDLE(S),
blockrows,
blockcols,
blocknnz,
mat_descr,
d_rowptr,
d_colind,
SUN_CUSP_QRINFO(S));
if (!SUNDIALS_CUSOLVER_VERIFY(status))
{
SUN_CUSP_LASTFLAG(S) = SUNLS_PACKAGE_FAIL_UNREC;
return SUN_CUSP_LASTFLAG(S);
}
/* Compute the workspace we will need */
status = _cusolverSpXcsrqrBufferInfoBatched(SUN_CUSOL_HANDLE(S),
blockrows,
blockcols,
blocknnz,
mat_descr,
d_data,
d_rowptr,
d_colind,
nblock,
SUN_CUSP_QRINFO(S),
&SUN_CUSP_INTERNAL_SIZE(S),
&SUN_CUSP_WORK_SIZE(S));
if (!SUNDIALS_CUSOLVER_VERIFY(status))
{
SUN_CUSP_LASTFLAG(S) = SUNLS_PACKAGE_FAIL_UNREC;
return SUN_CUSP_LASTFLAG(S);
}
cuerr = hipMalloc((void**) &SUN_CUSP_QRWORKSPACE(S), SUN_CUSP_WORK_SIZE(S));
if (!SUNDIALS_CUDA_VERIFY(cuerr))
{
SUN_CUSP_LASTFLAG(S) = SUNLS_PACKAGE_FAIL_UNREC;
return SUN_CUSP_LASTFLAG(S);
}
SUN_CUSP_FIRSTFACTORIZE(S) = SUNFALSE;
}
SUN_CUSP_LASTFLAG(S) = SUNLS_SUCCESS;
return(SUN_CUSP_LASTFLAG(S));
}
int SUNLinSolSolve_cuSolverSp_batchQR(SUNLinearSolver S, SUNMatrix A,
N_Vector x, N_Vector b, realtype tol)
{
cusolverStatus_t status;
int blockrows, blockcols, blocknnz, nblock;
int *d_rowptr, *d_colind;
realtype *d_data;
hipsparseMatDescr_t mat_descr;
if ((S == NULL) || (A == NULL) || (x == NULL) || (b == NULL))
return SUNLS_MEM_NULL;
SUN_CUSP_LASTFLAG(S) = SUNLS_SUCCESS;
realtype* device_b = N_VGetDeviceArrayPointer(b);
realtype* device_x = N_VGetDeviceArrayPointer(x);
if (SUN_CUSP_LASTFLAG(S) != SUNLS_SUCCESS)
return SUN_CUSP_LASTFLAG(S);
/* solve the system */
nblock = SUNMatrix_cuSparse_NumBlocks(A);
blocknnz = SUNMatrix_cuSparse_BlockNNZ(A);
blockrows = SUNMatrix_cuSparse_BlockRows(A);
blockcols = SUNMatrix_cuSparse_BlockColumns(A);
d_data = SUNMatrix_cuSparse_Data(A);
d_rowptr = SUNMatrix_cuSparse_IndexPointers(A);
d_colind = SUNMatrix_cuSparse_IndexValues(A);
mat_descr = SUNMatrix_cuSparse_MatDescr(A);
status = _cusolverSpXcsrqrsvBatched(SUN_CUSOL_HANDLE(S),
blockrows,
blockcols,
blocknnz,
mat_descr,
d_data,
d_rowptr,
d_colind,
device_b,
device_x,
nblock,
SUN_CUSP_QRINFO(S),
SUN_CUSP_QRWORKSPACE(S));
if (!SUNDIALS_CUSOLVER_VERIFY(status))
{
SUN_CUSP_LASTFLAG(S) = SUNLS_PACKAGE_FAIL_UNREC;
return SUN_CUSP_LASTFLAG(S);
}
return SUN_CUSP_LASTFLAG(S);
}
sunindextype SUNLinSolLastFlag_cuSolverSp_batchQR(SUNLinearSolver S)
{
if (S == NULL) return -1;
return SUN_CUSP_LASTFLAG(S);
}
int SUNLinSolFree_cuSolverSp_batchQR(SUNLinearSolver S)
{
/* return with success if already freed */
if (S == NULL) return SUNLS_SUCCESS;
/* free stuff in the content structure */
cusolverSpDestroyCsrqrInfo(SUN_CUSP_QRINFO(S));
hipFree(SUN_CUSP_QRWORKSPACE(S));
/* free content structure */
if (S->content) {
free(S->content);
S->content = NULL;
}
/* free ops structure */
if (S->ops) {
free(S->ops);
S->ops = NULL;
}
/* free the actual SUNLinSol */
free(S);
S = NULL;
return(SUNLS_SUCCESS);
}
| 137dd9a20da342c90ad7b39a0194e91e2f5780f5.cu | /* ----------------------------------------------------------------------------
* Programmer(s): Cody J. Balos @ LLNL
* ----------------------------------------------------------------------------
* Based on work by Donald Wilcox @ LBNL
* ----------------------------------------------------------------------------
* SUNDIALS Copyright Start
* Copyright (c) 2002-2021, Lawrence Livermore National Security
* and Southern Methodist University.
* All rights reserved.
*
* See the top-level LICENSE and NOTICE files for details.
*
* SPDX-License-Identifier: BSD-3-Clause
* SUNDIALS Copyright End
* ----------------------------------------------------------------------------
* Implementation file for cuSolverSp batched QR SUNLinearSolver interface.
* ----------------------------------------------------------------------------*/
#include <stdio.h>
#include <stdlib.h>
#include <sunmatrix/sunmatrix_cusparse.h>
#include <sunlinsol/sunlinsol_cusolversp_batchqr.h>
#include "sundials_cuda.h"
#include "sundials_debug.h"
#define ZERO RCONST(0.0)
#define ONE RCONST(1.0)
#define TWO RCONST(2.0)
/* macros for handling the different function names based on precision */
#if defined(SUNDIALS_DOUBLE_PRECISION)
#define _cusolverSpXcsrqrBufferInfoBatched cusolverSpDcsrqrBufferInfoBatched
#define _cusolverSpXcsrqrsvBatched cusolverSpDcsrqrsvBatched
#elif defined(SUNDIALS_SINGLE_PRECISION)
#define _cusolverSpXcsrqrBufferInfoBatched cusolverSpScsrqrBufferInfoBatched
#define _cusolverSpXcsrqrsvBatched cusolverSpScsrqrsvBatched
#endif
/*
* -----------------------------------------------------------------
* cuSolverSp solver structure accessibility macros:
* -----------------------------------------------------------------
*/
#define SUN_CUSP_CONTENT(S) ( (SUNLinearSolverContent_cuSolverSp_batchQR)(S->content) )
#define SUN_CUSP_QRWORKSPACE(S) ( SUN_CUSP_CONTENT(S)->workspace )
#define SUN_CUSP_FIRSTFACTORIZE(S) ( SUN_CUSP_CONTENT(S)->first_factorize )
#define SUN_CUSP_LASTFLAG(S) ( SUN_CUSP_CONTENT(S)->last_flag )
#define SUN_CUSOL_HANDLE(S) ( SUN_CUSP_CONTENT(S)->cusolver_handle )
#define SUN_CUSP_DESC(S) ( SUN_CUSP_CONTENT(S)->desc )
#define SUN_CUSP_QRINFO(S) ( SUN_CUSP_CONTENT(S)->info )
#define SUN_CUSP_INTERNAL_SIZE(S) ( SUN_CUSP_CONTENT(S)->internal_size )
#define SUN_CUSP_WORK_SIZE(S) ( SUN_CUSP_CONTENT(S)->workspace_size )
/*
* ----------------------------------------------------------------------------
* Implementations of exported functions.
* ----------------------------------------------------------------------------
*/
SUNLinearSolver SUNLinSol_cuSolverSp_batchQR(N_Vector y, SUNMatrix A, cusolverSpHandle_t cusol_handle)
{
/* Check that required arguments are not NULL */
if (y == NULL || A == NULL)
{
SUNDIALS_DEBUG_PRINT("ERROR in SUNLinSol_cuSolverSp_batchQR: y or A is null\n");
return NULL;
}
/* Check compatibility with supplied SUNMatrix and N_Vector */
if (SUNMatGetID(A) != SUNMATRIX_CUSPARSE || y->ops->nvgetdevicearraypointer == NULL)
{
SUNDIALS_DEBUG_PRINT("ERROR in SUNLinSol_cuSolverSp_batchQR: illegal type for y or A\n");
return NULL;
}
/* Matrix and vector dimensions must agree */
if (N_VGetLength(y) != SUNMatrix_cuSparse_Columns(A))
{
SUNDIALS_DEBUG_PRINT("ERROR in SUNLinSol_cuSolverSp_batchQR: matrix and vector dimensions don't agree\n");
return NULL;
}
/* Create an empty linear solver */
SUNLinearSolver S;
S = NULL;
S = SUNLinSolNewEmpty();
if (S == NULL)
{
return NULL;
}
/* Attach operations */
S->ops->gettype = SUNLinSolGetType_cuSolverSp_batchQR;
S->ops->getid = SUNLinSolGetID_cuSolverSp_batchQR;
S->ops->initialize = SUNLinSolInitialize_cuSolverSp_batchQR;
S->ops->setup = SUNLinSolSetup_cuSolverSp_batchQR;
S->ops->solve = SUNLinSolSolve_cuSolverSp_batchQR;
S->ops->lastflag = SUNLinSolLastFlag_cuSolverSp_batchQR;
S->ops->free = SUNLinSolFree_cuSolverSp_batchQR;
/* Create content */
SUNLinearSolverContent_cuSolverSp_batchQR content;
content = NULL;
content = (SUNLinearSolverContent_cuSolverSp_batchQR) malloc(sizeof(*content));
if (content == NULL)
{
SUNLinSolFree(S);
return NULL;
}
/* Attach content */
S->content = content;
/* Fill content */
content->last_flag = SUNLS_SUCCESS;
content->first_factorize = SUNTRUE;
content->internal_size = 0;
content->workspace_size = 0;
content->cusolver_handle = cusol_handle;
content->info = NULL;
content->workspace = NULL;
content->desc = NULL;
return S;
}
/*
* -----------------------------------------------------------------
* Implementation of accessor and setter functions.
* -----------------------------------------------------------------
*/
void SUNLinSol_cuSolverSp_batchQR_GetDescription(SUNLinearSolver S, const char** desc)
{
*desc = SUN_CUSP_DESC(S);
}
void SUNLinSol_cuSolverSp_batchQR_SetDescription(SUNLinearSolver S, const char* desc)
{
SUN_CUSP_DESC(S) = desc;
}
void SUNLinSol_cuSolverSp_batchQR_GetDeviceSpace(SUNLinearSolver S,
size_t* cuSolverInternal,
size_t* cuSolverWorkspace)
{
/* size is in bytes */
*cuSolverInternal = SUN_CUSP_INTERNAL_SIZE(S); /* buffer for Q and R factors */
*cuSolverWorkspace = SUN_CUSP_WORK_SIZE(S); /* numerical factorization buffer */
}
/*
* -----------------------------------------------------------------
* Implementation of linear solver operations
* -----------------------------------------------------------------
*/
SUNLinearSolver_Type SUNLinSolGetType_cuSolverSp_batchQR(SUNLinearSolver S)
{
return(SUNLINEARSOLVER_DIRECT);
}
SUNLinearSolver_ID SUNLinSolGetID_cuSolverSp_batchQR(SUNLinearSolver S)
{
return(SUNLINEARSOLVER_CUSOLVERSP_BATCHQR);
}
int SUNLinSolInitialize_cuSolverSp_batchQR(SUNLinearSolver S)
{
SUN_CUSP_FIRSTFACTORIZE(S) = SUNTRUE;
SUN_CUSP_LASTFLAG(S) = SUNLS_SUCCESS;
return(SUN_CUSP_LASTFLAG(S));
}
int SUNLinSolSetup_cuSolverSp_batchQR(SUNLinearSolver S, SUNMatrix A)
{
int blockrows, blockcols, blocknnz, nblock;
int *d_rowptr, *d_colind;
realtype *d_data;
cusparseMatDescr_t mat_descr;
cudaError_t cuerr;
cusolverStatus_t status;
if (SUN_CUSP_LASTFLAG(S) != SUNLS_SUCCESS)
return SUN_CUSP_LASTFLAG(S);
if (SUN_CUSP_FIRSTFACTORIZE(S))
{
/* Free old workspace and symbloic analysis */
if (SUN_CUSP_QRWORKSPACE(S))
{
cudaFree(SUN_CUSP_QRWORKSPACE(S));
cusolverSpDestroyCsrqrInfo(SUN_CUSP_QRINFO(S));
}
/* We must create a new csrqrinfo_t context every time we want to
do a symbolic analysis. Trying to reuse it results in a
CUSOLVER_STATUS_INVALID_VALUE error. */
status = cusolverSpCreateCsrqrInfo(&SUN_CUSP_QRINFO(S));
if (!SUNDIALS_CUSOLVER_VERIFY(status))
{
SUN_CUSP_LASTFLAG(S) = SUNLS_PACKAGE_FAIL_UNREC;
return SUN_CUSP_LASTFLAG(S);
}
nblock = SUNMatrix_cuSparse_NumBlocks(A);
blocknnz = SUNMatrix_cuSparse_BlockNNZ(A);
blockrows = SUNMatrix_cuSparse_BlockRows(A);
blockcols = SUNMatrix_cuSparse_BlockColumns(A);
d_data = SUNMatrix_cuSparse_Data(A);
d_rowptr = SUNMatrix_cuSparse_IndexPointers(A);
d_colind = SUNMatrix_cuSparse_IndexValues(A);
mat_descr = SUNMatrix_cuSparse_MatDescr(A);
/* Perform symbolic analysis of sparsity structure */
status = cusolverSpXcsrqrAnalysisBatched(SUN_CUSOL_HANDLE(S),
blockrows,
blockcols,
blocknnz,
mat_descr,
d_rowptr,
d_colind,
SUN_CUSP_QRINFO(S));
if (!SUNDIALS_CUSOLVER_VERIFY(status))
{
SUN_CUSP_LASTFLAG(S) = SUNLS_PACKAGE_FAIL_UNREC;
return SUN_CUSP_LASTFLAG(S);
}
/* Compute the workspace we will need */
status = _cusolverSpXcsrqrBufferInfoBatched(SUN_CUSOL_HANDLE(S),
blockrows,
blockcols,
blocknnz,
mat_descr,
d_data,
d_rowptr,
d_colind,
nblock,
SUN_CUSP_QRINFO(S),
&SUN_CUSP_INTERNAL_SIZE(S),
&SUN_CUSP_WORK_SIZE(S));
if (!SUNDIALS_CUSOLVER_VERIFY(status))
{
SUN_CUSP_LASTFLAG(S) = SUNLS_PACKAGE_FAIL_UNREC;
return SUN_CUSP_LASTFLAG(S);
}
cuerr = cudaMalloc((void**) &SUN_CUSP_QRWORKSPACE(S), SUN_CUSP_WORK_SIZE(S));
if (!SUNDIALS_CUDA_VERIFY(cuerr))
{
SUN_CUSP_LASTFLAG(S) = SUNLS_PACKAGE_FAIL_UNREC;
return SUN_CUSP_LASTFLAG(S);
}
SUN_CUSP_FIRSTFACTORIZE(S) = SUNFALSE;
}
SUN_CUSP_LASTFLAG(S) = SUNLS_SUCCESS;
return(SUN_CUSP_LASTFLAG(S));
}
int SUNLinSolSolve_cuSolverSp_batchQR(SUNLinearSolver S, SUNMatrix A,
N_Vector x, N_Vector b, realtype tol)
{
cusolverStatus_t status;
int blockrows, blockcols, blocknnz, nblock;
int *d_rowptr, *d_colind;
realtype *d_data;
cusparseMatDescr_t mat_descr;
if ((S == NULL) || (A == NULL) || (x == NULL) || (b == NULL))
return SUNLS_MEM_NULL;
SUN_CUSP_LASTFLAG(S) = SUNLS_SUCCESS;
realtype* device_b = N_VGetDeviceArrayPointer(b);
realtype* device_x = N_VGetDeviceArrayPointer(x);
if (SUN_CUSP_LASTFLAG(S) != SUNLS_SUCCESS)
return SUN_CUSP_LASTFLAG(S);
/* solve the system */
nblock = SUNMatrix_cuSparse_NumBlocks(A);
blocknnz = SUNMatrix_cuSparse_BlockNNZ(A);
blockrows = SUNMatrix_cuSparse_BlockRows(A);
blockcols = SUNMatrix_cuSparse_BlockColumns(A);
d_data = SUNMatrix_cuSparse_Data(A);
d_rowptr = SUNMatrix_cuSparse_IndexPointers(A);
d_colind = SUNMatrix_cuSparse_IndexValues(A);
mat_descr = SUNMatrix_cuSparse_MatDescr(A);
status = _cusolverSpXcsrqrsvBatched(SUN_CUSOL_HANDLE(S),
blockrows,
blockcols,
blocknnz,
mat_descr,
d_data,
d_rowptr,
d_colind,
device_b,
device_x,
nblock,
SUN_CUSP_QRINFO(S),
SUN_CUSP_QRWORKSPACE(S));
if (!SUNDIALS_CUSOLVER_VERIFY(status))
{
SUN_CUSP_LASTFLAG(S) = SUNLS_PACKAGE_FAIL_UNREC;
return SUN_CUSP_LASTFLAG(S);
}
return SUN_CUSP_LASTFLAG(S);
}
sunindextype SUNLinSolLastFlag_cuSolverSp_batchQR(SUNLinearSolver S)
{
if (S == NULL) return -1;
return SUN_CUSP_LASTFLAG(S);
}
int SUNLinSolFree_cuSolverSp_batchQR(SUNLinearSolver S)
{
/* return with success if already freed */
if (S == NULL) return SUNLS_SUCCESS;
/* free stuff in the content structure */
cusolverSpDestroyCsrqrInfo(SUN_CUSP_QRINFO(S));
cudaFree(SUN_CUSP_QRWORKSPACE(S));
/* free content structure */
if (S->content) {
free(S->content);
S->content = NULL;
}
/* free ops structure */
if (S->ops) {
free(S->ops);
S->ops = NULL;
}
/* free the actual SUNLinSol */
free(S);
S = NULL;
return(SUNLS_SUCCESS);
}
|
1901d79a024d88e43d0a9214ffb6e176ae7d174f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <hipcub/hipcub.hpp>
#include "caffe2/core/common_gpu.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/sgd/adam_op.h"
namespace caffe2 {
__global__ void AdamUpdate(
int N,
const float* g,
const float* m,
const float* v,
float* ng,
float* nm,
float* nv,
float beta1,
float beta2,
float eps_hat,
float correction,
const float* lr) {
CUDA_1D_KERNEL_LOOP(i, N) {
float gi = g[i];
float mi = nm[i] = m[i] * beta1 + gi * (1 - beta1);
float vi = nv[i] = v[i] * beta2 + gi * gi * (1 - beta2);
ng[i] = lr[0] * correction * mi / (sqrtf(vi) + eps_hat);
}
}
template <>
void adam_update<CUDAContext>(
int N,
const float* g,
const float* m,
const float* v,
float* ng,
float* nm,
float* nv,
float beta1,
float beta2,
float eps_hat,
float correction,
const float* lr,
CUDAContext* context) {
hipLaunchKernelGGL(( AdamUpdate),
dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
N, g, m, v, ng, nm, nv, beta1, beta2, eps_hat, correction, lr);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
__global__ void AdamCompute(
int N,
const float* w,
const float* g,
const float* m,
const float* v,
float* nw,
float* nm,
float* nv,
float beta1,
float beta2,
float eps_hat,
float correction,
const float* lr) {
CUDA_1D_KERNEL_LOOP(i, N) {
float gi = g[i];
float mi = nm[i] = m[i] * beta1 + gi * (1 - beta1);
float vi = nv[i] = v[i] * beta2 + gi * gi * (1 - beta2);
float ng = lr[0] * correction * mi / (sqrtf(vi) + eps_hat);
nw[i] = w[i] + ng;
}
}
template <>
void adam_compute<CUDAContext>(
int N,
const float* w,
const float* g,
const float* m,
const float* v,
float* nw,
float* nm,
float* nv,
float beta1,
float beta2,
float eps_hat,
float correction,
const float* lr,
CUDAContext* context) {
hipLaunchKernelGGL(( AdamCompute),
dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
N, w, g, m, v, nw, nm, nv, beta1, beta2, eps_hat, correction, lr);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
__global__ void AdamComputeOutputGrad(
int N,
const float* w,
const float* g,
const float* m,
const float* v,
float* nw,
float* nm,
float* nv,
float* ng,
float beta1,
float beta2,
float eps_hat,
float correction,
const float* lr) {
CUDA_1D_KERNEL_LOOP(i, N) {
float gi = g[i];
float mi = nm[i] = m[i] * beta1 + gi * (1 - beta1);
float vi = nv[i] = v[i] * beta2 + gi * gi * (1 - beta2);
float ngi = ng[i] = correction * mi / (sqrtf(vi) + eps_hat);
nw[i] = w[i] + lr[0] * ngi;
}
}
template <>
void adam_compute_output_grad<CUDAContext>(
int N,
const float* w,
const float* g,
const float* m,
const float* v,
float* nw,
float* nm,
float* nv,
float* ng,
float beta1,
float beta2,
float eps_hat,
float correction,
const float* lr,
CUDAContext* context) {
hipLaunchKernelGGL(( AdamComputeOutputGrad),
dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
N, w, g, m, v, nw, nm, nv, ng, beta1, beta2, eps_hat, correction, lr);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
template <typename SIndex>
__global__ void SparseAdamKernel(
const size_t N,
const size_t grad_slice_sz,
const float beta1,
const float beta2,
const float epsilon,
float* param,
float* mom1,
float* mom2,
const SIndex* indices,
const float* grad,
const float correction,
const float* lr,
const float iter) {
CUDA_1D_KERNEL_LOOP(i, N) {
const size_t gradIdx = i;
const SIndex index = indices[i / grad_slice_sz];
const size_t paramIdx = index * grad_slice_sz + (i % grad_slice_sz);
float m1n = mom1[paramIdx] =
mom1[paramIdx] * beta1 + grad[gradIdx] * (1.0f - beta1);
float m2n = mom2[paramIdx] =
mom2[paramIdx] * beta2 + grad[gradIdx] * grad[gradIdx] * (1.0f - beta2);
param[paramIdx] += lr[0] * correction * m1n / (sqrt(m2n) + epsilon);
}
}
template <typename SIndex>
__global__ void SparseAdamOutputGradKernel(
const size_t N,
const size_t grad_slice_sz,
const float beta1,
const float beta2,
const float epsilon,
float* param,
float* mom1,
float* mom2,
float* output_grad,
const SIndex* indices,
const float* grad,
const float correction,
const float* lr,
const float iter) {
CUDA_1D_KERNEL_LOOP(i, N) {
const size_t gradIdx = i;
const SIndex index = indices[i / grad_slice_sz];
const size_t paramIdx = index * grad_slice_sz + (i % grad_slice_sz);
float m1n = mom1[paramIdx] =
mom1[paramIdx] * beta1 + grad[gradIdx] * (1.0f - beta1);
float m2n = mom2[paramIdx] =
mom2[paramIdx] * beta2 + grad[gradIdx] * grad[gradIdx] * (1.0f - beta2);
float gradOut = output_grad[gradIdx] =
correction * m1n / (sqrt(m2n) + epsilon);
param[paramIdx] += lr[0] * gradOut;
}
}
template <typename SIndex>
__global__ void RowWiseSparseAdamKernel(
const int M,
const int N,
const float beta1,
const float beta2,
const float epsilon,
float* param,
float* mom1,
float* mom2,
const SIndex* indices,
const float* grad,
const float correction,
const float* lr) {
typedef hipcub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ BlockReduce::TempStorage temp_storage;
int valid = min(N, CAFFE_CUDA_NUM_THREADS);
// in case gridDim is smaller than M
for (int i = blockIdx.x; i < M; i += gridDim.x) {
const SIndex index = indices[i];
float sum_squares = 0.0;
__shared__ float row_sum_squares_avg;
// in case N is bigger than block size which is 512 by default
for (int j = threadIdx.x; j < N; j += blockDim.x) {
const float x_ij = grad[i * N + j];
sum_squares += x_ij * x_ij;
}
float reduce_sum_squares =
BlockReduce(temp_storage).Sum(sum_squares, valid);
if (threadIdx.x == 0) {
row_sum_squares_avg = reduce_sum_squares / (float)N;
mom2[index] = mom2[index] * beta2 + row_sum_squares_avg * (1.0f - beta2);
}
__syncthreads();
// update param
float step = correction / (std::sqrt(mom2[index]) + epsilon);
for (int j = threadIdx.x; j < N; j += blockDim.x) {
mom1[index * N + j] =
mom1[index * N + j] * beta1 + grad[i * N + j] * (1.0f - beta1);
param[index * N + j] += lr[0] * mom1[index * N + j] * step;
}
}
}
template <typename SIndex>
__global__ void RowWiseSparseAdamOutputGradKernel(
const int M,
const int N,
const float beta1,
const float beta2,
const float epsilon,
float* param,
float* mom1,
float* mom2,
float* output_grad,
const SIndex* indices,
const float* grad,
const float correction,
const float* lr) {
typedef hipcub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ BlockReduce::TempStorage temp_storage;
int valid = min(N, CAFFE_CUDA_NUM_THREADS);
// in case gridDim is smaller than M
for (int i = blockIdx.x; i < M; i += gridDim.x) {
const SIndex index = indices[i];
float sum_squares = 0.0;
__shared__ float row_sum_squares_avg;
// in case N is bigger than block size which is 512 by default
for (int j = threadIdx.x; j < N; j += blockDim.x) {
const float x_ij = grad[i * N + j];
sum_squares += x_ij * x_ij;
}
float reduce_sum_squares =
BlockReduce(temp_storage).Sum(sum_squares, valid);
if (threadIdx.x == 0) {
row_sum_squares_avg = reduce_sum_squares / (float)N;
mom2[index] = mom2[index] * beta2 + row_sum_squares_avg * (1.0f - beta2);
}
__syncthreads();
// update param
float step = correction / (std::sqrt(mom2[index]) + epsilon);
for (int j = threadIdx.x; j < N; j += blockDim.x) {
mom1[index * N + j] =
mom1[index * N + j] * beta1 + grad[i * N + j] * (1.0f - beta1);
output_grad[i * N + j] = mom1[index * N + j] * step;
param[index * N + j] += lr[0] * output_grad[i * N + j];
}
}
}
template <>
template <typename SIndex>
bool SparseAdamOp<float, CUDAContext>::DoRunWithType() {
Output(OUTPUT_PARAM)->ResizeLike(Input(PARAM));
Output(OUTPUT_MOMENT_1)->ResizeLike(Input(MOMENT_1));
Output(OUTPUT_MOMENT_2)->ResizeLike(Input(MOMENT_2));
auto N = Input(GRAD).size();
auto grad_slice_sz = Input(GRAD).size_from_dim(Input(INDICES).ndim());
const auto iter =
OperatorBase::Input<Tensor>(ITER, CPU).template data<int64_t>()[0];
const float correction = sqrtf(1.0f - ::pow(beta2_, iter + 1)) /
(1.0f - ::pow(beta1_, iter + 1));
if (OutputSize() == 3) {
hipLaunchKernelGGL(( SparseAdamKernel<SIndex>)
, dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
grad_slice_sz,
beta1_,
beta2_,
epsilon_,
Output(OUTPUT_PARAM)->template mutable_data<float>(),
Output(OUTPUT_MOMENT_1)->template mutable_data<float>(),
Output(OUTPUT_MOMENT_2)->template mutable_data<float>(),
Input(INDICES).template data<SIndex>(),
Input(GRAD).template data<float>(),
correction,
Input(LR).template data<float>(),
iter);
C10_HIP_KERNEL_LAUNCH_CHECK();
} else {
Output(OUTPUT_GRAD)->ResizeLike(Input(GRAD));
hipLaunchKernelGGL(( SparseAdamOutputGradKernel<SIndex>)
, dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
grad_slice_sz,
beta1_,
beta2_,
epsilon_,
Output(OUTPUT_PARAM)->template mutable_data<float>(),
Output(OUTPUT_MOMENT_1)->template mutable_data<float>(),
Output(OUTPUT_MOMENT_2)->template mutable_data<float>(),
Output(OUTPUT_GRAD)->template mutable_data<float>(),
Input(INDICES).template data<SIndex>(),
Input(GRAD).template data<float>(),
correction,
Input(LR).template data<float>(),
iter);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
return true;
}
template <>
template <typename SIndex>
bool RowWiseSparseAdamOp<float, CUDAContext>::DoRunWithType() {
Output(OUTPUT_PARAM)->ResizeLike(Input(PARAM));
Output(OUTPUT_MOMENT_1)->ResizeLike(Input(MOMENT_1));
Output(OUTPUT_MOMENT_2)->ResizeLike(Input(MOMENT_2));
auto N = Input(GRAD).size();
if (N == 0) {
// empty grad, nothing to do here, not even launching the kernel
return true;
}
const auto iter =
OperatorBase::Input<Tensor>(ITER, CPU).template data<int64_t>()[0];
const float correction = sqrtf(1.0f - ::pow(beta2_, iter + 1)) /
(1.0f - ::pow(beta1_, iter + 1));
// size of the 1st dimension of the input gradient
auto GRAD_M = Input(GRAD).dim32(0);
auto GRAD_N = N / GRAD_M;
if (OutputSize() == 3) {
hipLaunchKernelGGL(( RowWiseSparseAdamKernel<SIndex>)
, dim3(::min(GRAD_M, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
GRAD_M,
GRAD_N,
beta1_,
beta2_,
epsilon_,
Output(OUTPUT_PARAM)->template mutable_data<float>(),
Output(OUTPUT_MOMENT_1)->template mutable_data<float>(),
Output(OUTPUT_MOMENT_2)->template mutable_data<float>(),
Input(INDICES).template data<SIndex>(),
Input(GRAD).template data<float>(),
correction,
Input(LR).template data<float>());
C10_HIP_KERNEL_LAUNCH_CHECK();
} else {
Output(OUTPUT_GRAD)->ResizeLike(Input(GRAD));
hipLaunchKernelGGL(( RowWiseSparseAdamOutputGradKernel<SIndex>)
, dim3(::min(GRAD_M, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
GRAD_M,
GRAD_N,
beta1_,
beta2_,
epsilon_,
Output(OUTPUT_PARAM)->template mutable_data<float>(),
Output(OUTPUT_MOMENT_1)->template mutable_data<float>(),
Output(OUTPUT_MOMENT_2)->template mutable_data<float>(),
Output(OUTPUT_GRAD)->template mutable_data<float>(),
Input(INDICES).template data<SIndex>(),
Input(GRAD).template data<float>(),
correction,
Input(LR).template data<float>());
C10_HIP_KERNEL_LAUNCH_CHECK();
}
return true;
}
REGISTER_CUDA_OPERATOR(Adam, AdamOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(SparseAdam, SparseAdamOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
RowWiseSparseAdam,
RowWiseSparseAdamOp<float, CUDAContext>);
} // namespace caffe2
| 1901d79a024d88e43d0a9214ffb6e176ae7d174f.cu | #include <cub/block/block_reduce.cuh>
#include "caffe2/core/common_gpu.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/sgd/adam_op.h"
namespace caffe2 {
__global__ void AdamUpdate(
int N,
const float* g,
const float* m,
const float* v,
float* ng,
float* nm,
float* nv,
float beta1,
float beta2,
float eps_hat,
float correction,
const float* lr) {
CUDA_1D_KERNEL_LOOP(i, N) {
float gi = g[i];
float mi = nm[i] = m[i] * beta1 + gi * (1 - beta1);
float vi = nv[i] = v[i] * beta2 + gi * gi * (1 - beta2);
ng[i] = lr[0] * correction * mi / (sqrtf(vi) + eps_hat);
}
}
template <>
void adam_update<CUDAContext>(
int N,
const float* g,
const float* m,
const float* v,
float* ng,
float* nm,
float* nv,
float beta1,
float beta2,
float eps_hat,
float correction,
const float* lr,
CUDAContext* context) {
AdamUpdate<<<
CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
N, g, m, v, ng, nm, nv, beta1, beta2, eps_hat, correction, lr);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
__global__ void AdamCompute(
int N,
const float* w,
const float* g,
const float* m,
const float* v,
float* nw,
float* nm,
float* nv,
float beta1,
float beta2,
float eps_hat,
float correction,
const float* lr) {
CUDA_1D_KERNEL_LOOP(i, N) {
float gi = g[i];
float mi = nm[i] = m[i] * beta1 + gi * (1 - beta1);
float vi = nv[i] = v[i] * beta2 + gi * gi * (1 - beta2);
float ng = lr[0] * correction * mi / (sqrtf(vi) + eps_hat);
nw[i] = w[i] + ng;
}
}
template <>
void adam_compute<CUDAContext>(
int N,
const float* w,
const float* g,
const float* m,
const float* v,
float* nw,
float* nm,
float* nv,
float beta1,
float beta2,
float eps_hat,
float correction,
const float* lr,
CUDAContext* context) {
AdamCompute<<<
CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
N, w, g, m, v, nw, nm, nv, beta1, beta2, eps_hat, correction, lr);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
__global__ void AdamComputeOutputGrad(
int N,
const float* w,
const float* g,
const float* m,
const float* v,
float* nw,
float* nm,
float* nv,
float* ng,
float beta1,
float beta2,
float eps_hat,
float correction,
const float* lr) {
CUDA_1D_KERNEL_LOOP(i, N) {
float gi = g[i];
float mi = nm[i] = m[i] * beta1 + gi * (1 - beta1);
float vi = nv[i] = v[i] * beta2 + gi * gi * (1 - beta2);
float ngi = ng[i] = correction * mi / (sqrtf(vi) + eps_hat);
nw[i] = w[i] + lr[0] * ngi;
}
}
template <>
void adam_compute_output_grad<CUDAContext>(
int N,
const float* w,
const float* g,
const float* m,
const float* v,
float* nw,
float* nm,
float* nv,
float* ng,
float beta1,
float beta2,
float eps_hat,
float correction,
const float* lr,
CUDAContext* context) {
AdamComputeOutputGrad<<<
CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
N, w, g, m, v, nw, nm, nv, ng, beta1, beta2, eps_hat, correction, lr);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
template <typename SIndex>
__global__ void SparseAdamKernel(
const size_t N,
const size_t grad_slice_sz,
const float beta1,
const float beta2,
const float epsilon,
float* param,
float* mom1,
float* mom2,
const SIndex* indices,
const float* grad,
const float correction,
const float* lr,
const float iter) {
CUDA_1D_KERNEL_LOOP(i, N) {
const size_t gradIdx = i;
const SIndex index = indices[i / grad_slice_sz];
const size_t paramIdx = index * grad_slice_sz + (i % grad_slice_sz);
float m1n = mom1[paramIdx] =
mom1[paramIdx] * beta1 + grad[gradIdx] * (1.0f - beta1);
float m2n = mom2[paramIdx] =
mom2[paramIdx] * beta2 + grad[gradIdx] * grad[gradIdx] * (1.0f - beta2);
param[paramIdx] += lr[0] * correction * m1n / (sqrt(m2n) + epsilon);
}
}
template <typename SIndex>
__global__ void SparseAdamOutputGradKernel(
const size_t N,
const size_t grad_slice_sz,
const float beta1,
const float beta2,
const float epsilon,
float* param,
float* mom1,
float* mom2,
float* output_grad,
const SIndex* indices,
const float* grad,
const float correction,
const float* lr,
const float iter) {
CUDA_1D_KERNEL_LOOP(i, N) {
const size_t gradIdx = i;
const SIndex index = indices[i / grad_slice_sz];
const size_t paramIdx = index * grad_slice_sz + (i % grad_slice_sz);
float m1n = mom1[paramIdx] =
mom1[paramIdx] * beta1 + grad[gradIdx] * (1.0f - beta1);
float m2n = mom2[paramIdx] =
mom2[paramIdx] * beta2 + grad[gradIdx] * grad[gradIdx] * (1.0f - beta2);
float gradOut = output_grad[gradIdx] =
correction * m1n / (sqrt(m2n) + epsilon);
param[paramIdx] += lr[0] * gradOut;
}
}
template <typename SIndex>
__global__ void RowWiseSparseAdamKernel(
const int M,
const int N,
const float beta1,
const float beta2,
const float epsilon,
float* param,
float* mom1,
float* mom2,
const SIndex* indices,
const float* grad,
const float correction,
const float* lr) {
typedef cub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ BlockReduce::TempStorage temp_storage;
int valid = min(N, CAFFE_CUDA_NUM_THREADS);
// in case gridDim is smaller than M
for (int i = blockIdx.x; i < M; i += gridDim.x) {
const SIndex index = indices[i];
float sum_squares = 0.0;
__shared__ float row_sum_squares_avg;
// in case N is bigger than block size which is 512 by default
for (int j = threadIdx.x; j < N; j += blockDim.x) {
const float x_ij = grad[i * N + j];
sum_squares += x_ij * x_ij;
}
float reduce_sum_squares =
BlockReduce(temp_storage).Sum(sum_squares, valid);
if (threadIdx.x == 0) {
row_sum_squares_avg = reduce_sum_squares / (float)N;
mom2[index] = mom2[index] * beta2 + row_sum_squares_avg * (1.0f - beta2);
}
__syncthreads();
// update param
float step = correction / (std::sqrt(mom2[index]) + epsilon);
for (int j = threadIdx.x; j < N; j += blockDim.x) {
mom1[index * N + j] =
mom1[index * N + j] * beta1 + grad[i * N + j] * (1.0f - beta1);
param[index * N + j] += lr[0] * mom1[index * N + j] * step;
}
}
}
template <typename SIndex>
__global__ void RowWiseSparseAdamOutputGradKernel(
const int M,
const int N,
const float beta1,
const float beta2,
const float epsilon,
float* param,
float* mom1,
float* mom2,
float* output_grad,
const SIndex* indices,
const float* grad,
const float correction,
const float* lr) {
typedef cub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ BlockReduce::TempStorage temp_storage;
int valid = min(N, CAFFE_CUDA_NUM_THREADS);
// in case gridDim is smaller than M
for (int i = blockIdx.x; i < M; i += gridDim.x) {
const SIndex index = indices[i];
float sum_squares = 0.0;
__shared__ float row_sum_squares_avg;
// in case N is bigger than block size which is 512 by default
for (int j = threadIdx.x; j < N; j += blockDim.x) {
const float x_ij = grad[i * N + j];
sum_squares += x_ij * x_ij;
}
float reduce_sum_squares =
BlockReduce(temp_storage).Sum(sum_squares, valid);
if (threadIdx.x == 0) {
row_sum_squares_avg = reduce_sum_squares / (float)N;
mom2[index] = mom2[index] * beta2 + row_sum_squares_avg * (1.0f - beta2);
}
__syncthreads();
// update param
float step = correction / (std::sqrt(mom2[index]) + epsilon);
for (int j = threadIdx.x; j < N; j += blockDim.x) {
mom1[index * N + j] =
mom1[index * N + j] * beta1 + grad[i * N + j] * (1.0f - beta1);
output_grad[i * N + j] = mom1[index * N + j] * step;
param[index * N + j] += lr[0] * output_grad[i * N + j];
}
}
}
template <>
template <typename SIndex>
bool SparseAdamOp<float, CUDAContext>::DoRunWithType() {
Output(OUTPUT_PARAM)->ResizeLike(Input(PARAM));
Output(OUTPUT_MOMENT_1)->ResizeLike(Input(MOMENT_1));
Output(OUTPUT_MOMENT_2)->ResizeLike(Input(MOMENT_2));
auto N = Input(GRAD).size();
auto grad_slice_sz = Input(GRAD).size_from_dim(Input(INDICES).ndim());
const auto iter =
OperatorBase::Input<Tensor>(ITER, CPU).template data<int64_t>()[0];
const float correction = sqrtf(1.0f - std::pow(beta2_, iter + 1)) /
(1.0f - std::pow(beta1_, iter + 1));
if (OutputSize() == 3) {
SparseAdamKernel<SIndex>
<<<CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
grad_slice_sz,
beta1_,
beta2_,
epsilon_,
Output(OUTPUT_PARAM)->template mutable_data<float>(),
Output(OUTPUT_MOMENT_1)->template mutable_data<float>(),
Output(OUTPUT_MOMENT_2)->template mutable_data<float>(),
Input(INDICES).template data<SIndex>(),
Input(GRAD).template data<float>(),
correction,
Input(LR).template data<float>(),
iter);
C10_CUDA_KERNEL_LAUNCH_CHECK();
} else {
Output(OUTPUT_GRAD)->ResizeLike(Input(GRAD));
SparseAdamOutputGradKernel<SIndex>
<<<CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
grad_slice_sz,
beta1_,
beta2_,
epsilon_,
Output(OUTPUT_PARAM)->template mutable_data<float>(),
Output(OUTPUT_MOMENT_1)->template mutable_data<float>(),
Output(OUTPUT_MOMENT_2)->template mutable_data<float>(),
Output(OUTPUT_GRAD)->template mutable_data<float>(),
Input(INDICES).template data<SIndex>(),
Input(GRAD).template data<float>(),
correction,
Input(LR).template data<float>(),
iter);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
return true;
}
template <>
template <typename SIndex>
bool RowWiseSparseAdamOp<float, CUDAContext>::DoRunWithType() {
Output(OUTPUT_PARAM)->ResizeLike(Input(PARAM));
Output(OUTPUT_MOMENT_1)->ResizeLike(Input(MOMENT_1));
Output(OUTPUT_MOMENT_2)->ResizeLike(Input(MOMENT_2));
auto N = Input(GRAD).size();
if (N == 0) {
// empty grad, nothing to do here, not even launching the kernel
return true;
}
const auto iter =
OperatorBase::Input<Tensor>(ITER, CPU).template data<int64_t>()[0];
const float correction = sqrtf(1.0f - std::pow(beta2_, iter + 1)) /
(1.0f - std::pow(beta1_, iter + 1));
// size of the 1st dimension of the input gradient
auto GRAD_M = Input(GRAD).dim32(0);
auto GRAD_N = N / GRAD_M;
if (OutputSize() == 3) {
RowWiseSparseAdamKernel<SIndex>
<<<std::min(GRAD_M, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
GRAD_M,
GRAD_N,
beta1_,
beta2_,
epsilon_,
Output(OUTPUT_PARAM)->template mutable_data<float>(),
Output(OUTPUT_MOMENT_1)->template mutable_data<float>(),
Output(OUTPUT_MOMENT_2)->template mutable_data<float>(),
Input(INDICES).template data<SIndex>(),
Input(GRAD).template data<float>(),
correction,
Input(LR).template data<float>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
} else {
Output(OUTPUT_GRAD)->ResizeLike(Input(GRAD));
RowWiseSparseAdamOutputGradKernel<SIndex>
<<<std::min(GRAD_M, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
GRAD_M,
GRAD_N,
beta1_,
beta2_,
epsilon_,
Output(OUTPUT_PARAM)->template mutable_data<float>(),
Output(OUTPUT_MOMENT_1)->template mutable_data<float>(),
Output(OUTPUT_MOMENT_2)->template mutable_data<float>(),
Output(OUTPUT_GRAD)->template mutable_data<float>(),
Input(INDICES).template data<SIndex>(),
Input(GRAD).template data<float>(),
correction,
Input(LR).template data<float>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
return true;
}
REGISTER_CUDA_OPERATOR(Adam, AdamOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(SparseAdam, SparseAdamOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
RowWiseSparseAdam,
RowWiseSparseAdamOp<float, CUDAContext>);
} // namespace caffe2
|
15cadc9949e9133abaf249e4f7b22bcd57d5f98e.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Modifications Copyright 2017-2018 H2O.ai, Inc.
*/
#include "solver/glm.h"
#include <stdio.h>
#include <stdlib.h>
#include <thrust/device_vector.h>
#include <thrust/functional.h>
#include <thrust/transform.h>
#include <algorithm>
#include <limits>
#include <deque>
#include <numeric>
#include "cml/cml_blas.cuh"
#include "cml/cml_vector.cuh"
#include "interface_defs.h"
#include "matrix/matrix.h"
#include "matrix/matrix_dense.h"
#include "matrix/matrix_sparse.h"
#include "projector/projector.h"
#include "projector/projector_direct.h"
#include "projector/projector_cgls.h"
#include "util.h"
#include "cuda_utils.h"
#include "timer.h"
//#include "kmeans.h"
typedef struct {
double* sendBuff;
double* recvBuff;
int size;
hipStream_t stream;
} PerThreadData;
#define __HBAR__ \
"----------------------------------------------------------------------------\n"
namespace h2o4gpu {
namespace {
template<typename T, typename Op>
struct ApplyOp: thrust::binary_function<FunctionObj<T>, FunctionObj<T>, T> {
Op binary_op;
ApplyOp(Op binary_op) :
binary_op(binary_op) {
}
__host__ __device__ FunctionObj<T> operator()(FunctionObj<T> &h, T x) {
h.a = binary_op(h.a, x);
h.d = binary_op(h.d, x);
h.e = binary_op(binary_op(h.e, x), x);
return h;
}
};
} // namespace
template<typename T, typename M, typename P>
H2O4GPU<T, M, P>::H2O4GPU(int sharedA, int me, int wDev, const M &A) :
_A(sharedA, me, wDev, A), _P(wDev, _A), _z(0), _zt(0), _rho(
static_cast<T>(kRhoInit)), _done_init(false), _x(0), _y(0), _mu(
0), _lambda(0), _optval(static_cast<T>(0.)), _time(
static_cast<T>(0.)), _trainPreds(0), _validPreds(0), _xp(0), _trainPredsp(
0), _validPredsp(0), _trainerror(0), _validerror(0), _trainmean(
0), _validmean(0), _trainstddev(0), _validstddev(0), _final_iter(
0), _abs_tol(static_cast<T>(kAbsTol)), _rel_tol(
static_cast<T>(kRelTol)), _max_iter(kMaxIter), _stop_early(1), _stop_early_error_fraction(
1.0), _init_iter(kInitIter), _verbose(kVerbose), _adaptive_rho(
kAdaptiveRho), _equil(kEquil), _gap_stop(kGapStop), _init_x(
false), _init_lambda(false), _nDev(1), //FIXME - allow larger comm groups
_wDev(wDev)
#ifdef USE_NCCL2
,_comms(0)
#endif
{
checkwDev(_wDev);
CUDACHECK(hipSetDevice(_wDev));
_x = new T[_A.Cols()]();
_y = new T[_A.Rows()]();
_mu = new T[_A.Cols()]();
_lambda = new T[_A.Rows()]();
_trainPreds = new T[_A.Rows()]();
_validPreds = new T[_A.ValidRows()]();
}
template<typename T, typename M, typename P>
H2O4GPU<T, M, P>::H2O4GPU(const M &A) :
_A(A._sharedA, A._me, A._wDev, A), _P(_A._wDev, _A), _z(0), _zt(0), _rho(
static_cast<T>(kRhoInit)), _done_init(false), _x(0), _y(0), _mu(
0), _lambda(0), _optval(static_cast<T>(0.)), _time(
static_cast<T>(0.)), _trainPreds(0), _validPreds(0), _xp(0), _trainPredsp(
0), _validPredsp(0), _trainerror(0), _validerror(0), _trainmean(
0), _validmean(0), _trainstddev(0), _validstddev(0), _final_iter(
0), _abs_tol(static_cast<T>(kAbsTol)), _rel_tol(
static_cast<T>(kRelTol)), _max_iter(kMaxIter), _stop_early(1), _stop_early_error_fraction(
1.0), _init_iter(kInitIter), _verbose(kVerbose), _adaptive_rho(
kAdaptiveRho), _equil(kEquil), _gap_stop(kGapStop), _init_x(
false), _init_lambda(false), _nDev(1), //FIXME - allow larger comm groups
_wDev(_A._wDev)
#ifdef USE_NCCL2
,comms(0)
#endif
{
checkwDev(_wDev);
CUDACHECK(hipSetDevice(_wDev));
_x = new T[_A.Cols()]();
_y = new T[_A.Rows()]();
_mu = new T[_A.Cols()]();
_lambda = new T[_A.Rows()]();
_trainPreds = new T[_A.Rows()]();
_validPreds = new T[_A.ValidRows()]();
}
template<typename T, typename M, typename P>
int H2O4GPU<T, M, P>::_Init() {
DEBUG_EXPECT(!_done_init);
if (_done_init)
return 1;
_done_init = true;
CUDACHECK(hipSetDevice(_wDev));
#ifdef DEBUG
// get device ID
int devID;
CUDACHECK(hipGetDevice(&devID));
hipDeviceProp_t props;
// get device properties
CUDACHECK(hipGetDeviceProperties(&props, devID));
#endif
#ifdef USE_NCCL2
for (int i = 0; i < _nDev; i++) {
if(i==0 && i==_nDev-1) i=_wDev; // force to chosen device
hipDeviceProp_t props;
CUDACHECK(hipGetDeviceProperties(&props, i));
CUDACHECK(hipSetDevice(i));
// CUDACHECK(hipSetDeviceFlags(hipDeviceMapHost)); // TODO: MapHostMemory
printf("Using: Compute %d.%d CUDA device: [%s] with id=%2d\n", props.major, props.minor, props.name,i); fflush(stdout);
}
// initialize nccl
std::vector<int> dList(_nDev);
for (int i = 0; i < _nDev; ++i)
dList[i] = i % nVis;
ncclComm_t* _comms = (ncclComm_t*)malloc(sizeof(ncclComm_t)*_nDev);
NCCLCHECK(ncclCommInitAll(_comms, _nDev, dList.data()));// initialize communicator (One communicator per process)
printf("# NCCL: Using devices\n");
for (int g = 0; g < _nDev; ++g) {
int cudaDev;
int rank;
hipDeviceProp_t prop;
NCCLCHECK(ncclCommCuDevice(_comms[g], &cudaDev));
NCCLCHECK(ncclCommUserRank(_comms[g], &rank));
CUDACHECK(hipGetDeviceProperties(&prop, cudaDev));
printf("# Rank %2d uses device %2d [0x%02x] %s\n", rank, cudaDev,
prop.pciBusID, prop.name); fflush(stdout);
}
#endif
PUSH_RANGE("Malloc",Malloc,1);
double t0 = timer<double>();
size_t m = _A.Rows();
size_t mvalid = _A.ValidRows();
size_t n = _A.Cols();
hipMalloc(&_z, (m + n) * sizeof(T));
hipMemset(_z, 0, (m + n) * sizeof(T));
hipMalloc(&_zt, (m + n) * sizeof(T));
hipMemset(_zt, 0, (m + n) * sizeof(T));
// local (i.e. GPU) values for _x and training predictions (i.e. predicted y from Atrain*_x)
hipMalloc(&_xp, (n) * sizeof(T));
hipMalloc(&_trainPredsp, (m) * sizeof(T));
hipMalloc(&_validPredsp, (mvalid) * sizeof(T));
hipMemset(_xp, 0, (n) * sizeof(T));
hipMemset(_trainPredsp, 0, (m) * sizeof(T));
hipMemset(_validPredsp, 0, (mvalid) * sizeof(T));
CUDA_CHECK_ERR();
_A.Init();
POP_RANGE("Malloc",Malloc,1);
PUSH_RANGE("Eq",Eq,1);
_A.Equil(_equil);
POP_RANGE("Eq",Eq,1);
// PUSH_RANGE("Init1",Init1,1);
_P.Init();
CUDA_CHECK_ERR();
// POP_RANGE("Init1",Init1,1);
#ifdef DEBUG
printf("Time to allocate data structures: %f\n", timer<double>() - t0);
#endif
return 0;
}
template<typename T, typename M, typename P>
H2O4GPUStatus H2O4GPU<T, M, P>::Solve(const std::vector<FunctionObj<T> > &f,
const std::vector<FunctionObj<T> > &g) {
// PUSH_RANGE("H2O4GPUSolve",H2O4GPUSolve,1);
// Initialize Projector P and Matrix A.
if (!_done_init) {
// PUSH_RANGE("Init2",Init2,1);
_Init();
// POP_RANGE("Init2",Init2,1);
}
CUDACHECK(hipSetDevice(_wDev));
double t0 = timer<double>();
// TODO: Constants are set arbitrarily based upon limited experiments in academic papers
// Constants for adaptive-rho and over-relaxation.
const T kDeltaMin = static_cast<T>(1.05); // for adaptive rho and rescaling
const T kGamma = static_cast<T>(1.01); // for adaptive rho and rescaling
const T kTau = static_cast<T>(0.8); // for adaptive rho and rescaling
const T kAlpha = static_cast<T>(1.7); // set to 1.0 to disable over-relaxation technique, normally 1.5-1.8 and was set to 1.7
const T kKappa = static_cast<T>(0.9); // for adaptive rho and rescaling
const T kOne = static_cast<T>(1.0); // definition
const T kZero = static_cast<T>(0.0); // definition
const T kProjTolMax = static_cast<T>(1e-6); // Projection tolerance
const T kProjTolMin = static_cast<T>(1e-2); // Projection tolerance
const T kProjTolPow = static_cast<T>(1.3); // Projection tolerance
const T kProjTolIni = static_cast<T>(1e-5); // Projection tolerance
const bool use_exact_stop = true; // false does worse in trainerror and maximum number of iterations with simple.R
// fprintf(stderr,"solve _data=%p\n",_A._data); fflush(stderr);
// fprintf(stderr,"solve _datay=%p\n",_A._datay); fflush(stderr);
// Notes on variable names:
//
// Original Boyd ADMM paper solves:
// http://web.stanford.edu/~boyd/papers/pdf/admm_distr_stats.pdf
// Minimize: f(x) + g(z)
// Subject to: Ax + Bz = c
// Primary variable: x
// Dual variable: z
// Step size: \rho
// Where for Lasso: f(x) = (1/2)||x-b||_2^2 and g(z) = \lambda||z||_1 with constraint x=Az
//
// H2O4GPU paper and code:
// http://foges.github.io/h2o4gpu/ and http://stanford.edu/~boyd/papers/h2o4gpu.html
// Minimize: f(y) + g(x) for a variety (but limited set) of f and g shown in src/include/prox_lib.h
// Subject to: y = Ax (always)
// Where for Lasso: f(y) = (1/2)||y-b||_2^2 and g(x) = \lambda||x||_1 and constraint is y=Ax
// Primary variable: y
// Dual variable: x
// Step size or Proximal parameter: \rho
// Intermediate variable: z
// Internally h2o4gpu code uses \mu and \nu scaled variables, performs pre-conditioning using e and d.
// \lambda_{max} = ||A^T b|| makes sense if have (1/2) in front of f(y) for Lasso
//
// H2O4GPU overall steps:
// 1) Precondition A using d and e and renormalize variables and all equations using d and e
// 2) Compute Gramian: A^T A only once
// 3) Cholesky of gram: Only compute cholesky once -- s and info->s in Project just kOne=1 and just ensure GPU has cholesky already. Could have put into Init with Gramian)
// 4) Project: Solve L L^T x = b for x by forward and backward solve (Ly=b for y and then y=L^T x for x)
// 5) Repeat #4, until convergence from primary (min Ax-b) and dual (min f(y)+g(x)) residuals
// Extract values from h2o4gpu_data
PUSH_RANGE("H2O4GPUExtract",H2O4GPUExtract,3);
size_t m = _A.Rows();
size_t mvalid = _A.ValidRows();
size_t n = _A.Cols();
thrust::device_vector<FunctionObj<T> > f_gpu = f;
thrust::device_vector<FunctionObj<T> > g_gpu = g;
// TODO: Need to give scale to these
// const T kRhoMin = static_cast<T>(1e-4); // lower range for adaptive rho
// const T kRhoMax = static_cast<T>(1e4); // upper range for adaptive rho
const T kRhoMin = static_cast<T>(std::numeric_limits<T>::epsilon()); // lower range for adaptive rho
const T kRhoMax = static_cast<T>(1.0 / kRhoMin); // upper range for adaptive rho
POP_RANGE("H2O4GPUExtract",H2O4GPUExtract,3);
PUSH_RANGE("H2O4GPUAlloc",H2O4GPUAlloc,4);
// Create cuBLAS handle.
hipblasHandle_t hdl;
hipblasCreate(&hdl);
CUDA_CHECK_ERR();
// Allocate data for ADMM variables.
cml::vector<T> de = cml::vector_view_array(_A._de, m + n);
cml::vector<T> z = cml::vector_view_array(_z, m + n);
cml::vector<T> zt = cml::vector_view_array(_zt, m + n);
cml::vector<T> zprev = cml::vector_calloc<T>(m + n);
cml::vector<T> ztemp = cml::vector_calloc<T>(m + n);
cml::vector<T> z12 = cml::vector_calloc<T>(m + n);
CUDA_CHECK_ERR();
// Create views for x and y components (same memory space used, not value copy)
cml::vector<T> d = cml::vector_subvector(&de, 0, m);
cml::vector<T> e = cml::vector_subvector(&de, m, n);
cml::vector<T> x = cml::vector_subvector(&z, 0, n);
cml::vector<T> y = cml::vector_subvector(&z, n, m);
cml::vector<T> x12 = cml::vector_subvector(&z12, 0, n);
cml::vector<T> y12 = cml::vector_subvector(&z12, n, m);
cml::vector<T> xprev = cml::vector_subvector(&zprev, 0, n);
cml::vector<T> yprev = cml::vector_subvector(&zprev, n, m);
cml::vector<T> xtemp = cml::vector_subvector(&ztemp, 0, n);
cml::vector<T> ytemp = cml::vector_subvector(&ztemp, n, m);
CUDA_CHECK_ERR(); POP_RANGE("H2O4GPUAlloc",H2O4GPUAlloc,4);
PUSH_RANGE("H2O4GPUScale",H2O4GPUScale,5);
// Scale f and g to account for diagonal scaling e and d.
// f/d -> f
thrust::transform(f_gpu.begin(), f_gpu.end(),
thrust::device_pointer_cast(d.data), f_gpu.begin(),
ApplyOp<T, thrust::divides<T> >(thrust::divides<T>()));
// g*e -> g
thrust::transform(g_gpu.begin(), g_gpu.end(),
thrust::device_pointer_cast(e.data), g_gpu.begin(),
ApplyOp<T, thrust::multiplies<T> >(thrust::multiplies<T>()));
CUDA_CHECK_ERR(); POP_RANGE("H2O4GPUScale",H2O4GPUScale,5);
PUSH_RANGE("Lambda",Lambda,6);
// Initialize (x, lambda) from (x0, lambda0).
if (_init_x) {
cml::vector_memcpy(&xtemp, _x); // _x->xtemp
cml::vector_div(&xtemp, &e); // xtemp/e -> xtemp
_A.Mul('n', kOne, xtemp.data, kZero, ytemp.data); // kOne*A*x + kZero*y -> y
wrapcudaDeviceSynchronize(); // not needed, as vector_memory is cuda call and will follow sequentially on device
cml::vector_memcpy(&z, &ztemp); // ztemp->z (xtemp and ytemp are views of ztemp)
CUDA_CHECK_ERR();
}
if (_init_lambda) {
cml::vector_memcpy(&ytemp, _lambda); // _lambda->ytemp
cml::vector_div(&ytemp, &d); // ytemp/d -> ytemp
_A.Mul('t', -kOne, ytemp.data, kZero, xtemp.data); // -kOne*y+kZero*x -> x
wrapcudaDeviceSynchronize(); // not needed, as vector_memory is cuda call and will follow sequentially on device
if (_rho != 0)
cml::blas_scal(hdl, -kOne / _rho, &ztemp); // ztemp = ztemp * (-kOne/_rho)
else
cml::blas_scal(hdl, kZero, &ztemp); // ztemp = ztemp * (-kOne/_rho)
cml::vector_memcpy(&zt, &ztemp); // ztemp->zt
CUDA_CHECK_ERR();
} POP_RANGE("Lambda",Lambda,6);
PUSH_RANGE("Guess",Guess,7);
// Make an initial guess for (x0 or lambda0).
if (_init_x && !_init_lambda) {
// Alternating projections to satisfy
// 1. \lambda \in \partial f(y), \mu \in \partial g(x)
// 2. \mu = -A^T\lambda
cml::vector_set_all(&zprev, kZero); // zprev = kZero
for (unsigned int i = 0; i < kInitIter; ++i) {
#ifdef USE_NVTX
char mystring[100];
sprintf(mystring,"GStep%d",i);
PUSH_RANGE(mystring,GStep,1);
#endif
ProjSubgradEval(g_gpu, xprev.data, x.data, xtemp.data);
ProjSubgradEval(f_gpu, yprev.data, y.data, ytemp.data);
_P.Project(xtemp.data, ytemp.data, kOne, xprev.data, yprev.data,
kProjTolIni);
wrapcudaDeviceSynchronize(); // not needed, as blas's are cuda call and will follow sequentially on device
CUDA_CHECK_ERR();
cml::blas_axpy(hdl, -kOne, &ztemp, &zprev); // alpha*X + Y -> Y
cml::blas_scal(hdl, -kOne, &zprev);
#ifdef USE_NVTX
POP_RANGE(mystring,GStep,1);
#endif
}
// xt = -1 / \rho * \mu, yt = -1 / \rho * \lambda.
cml::vector_memcpy(&zt, &zprev); // zprev->zt
if (_rho != 0)
cml::blas_scal(hdl, -kOne / _rho, &zt);
else
cml::blas_scal(hdl, kZero, &zt);
} else if (_init_lambda && !_init_x) {
ASSERT(false);
}
_init_x = _init_lambda = false;
POP_RANGE("Guess",Guess,7);
// Save initialization time.
double time_init = timer<double>() - t0;
#ifdef DEBUG
printf("Time to initialize: %f\n", time_init);
#endif
// Signal start of execution.
if (_verbose > 0) {
#pragma omp critical
{
printMe(std::cout, f[1].a, f[1].b, f[1].c, f[1].d, f[1].e, g[1].a,
g[1].b, g[1].c, g[1].d, g[1].e); //debugging only: print the second since the first can be for intercept (which is then 0)
//printData(std::cout); //only works for data in host memory!
}
}
if (_verbose > 1) {
Printf(
__HBAR__
" Iter | pri res | pri tol | dua res | dua tol | gap | eps gap |"
" pri obj\n" __HBAR__);
}
// Initialize scalars.
T sqrtn_atol = std::sqrt(static_cast<T>(n)) * _abs_tol;
T sqrtm_atol = std::sqrt(static_cast<T>(m)) * _abs_tol;
T sqrtmn_atol = std::sqrt(static_cast<T>(m + n)) * _abs_tol;
T delta = kDeltaMin, xi = static_cast<T>(1.0);
unsigned int k = 0u, kd = 0u, ku = 0u;
bool converged = false;
T nrm_r, nrm_s, gap, eps_gap, eps_pri, eps_dua;
// Stop early setup
unsigned int QUEUELENGTH = 10;
std::deque<T> nrm_r_deque;
std::deque<T> nrm_s_deque;
std::deque<T> nrm_r_avg;
std::deque<T> nrm_s_avg;
std::deque<T> nrm_r_error;
std::deque<T> nrm_s_error;
// LOOP until satisfy convergence criteria
for (;; ++k) {
#ifdef USE_NVTX
char mystring[100];
sprintf(mystring,"Step%d",k);
PUSH_RANGE(mystring,Step,1);
#endif
cml::vector_memcpy(&zprev, &z);
// Evaluate Proximal Operators g and f based upon chosen problem setup
PUSH_RANGE("Evaluate_fg",Evaluate_fg,9);
cml::blas_axpy(hdl, -kOne, &zt, &z); // -kOne*zt+z -> z
ProxEval(g_gpu, _rho, x.data, x12.data); // Evaluate g(rho,x)->x12 (x^{1/2} in paper)
ProxEval(f_gpu, _rho, y.data, y12.data); // Evaluate f(rho,y)->y12 (y^{1/2} in paper)
CUDA_CHECK_ERR(); POP_RANGE("Evaluate_fg",Evaluate_fg,9);
// Compute gap, optval, and tolerances.
PUSH_RANGE("gapoptvaltol",gapoptvaltol,9);
cml::blas_axpy(hdl, -kOne, &z12, &z); // -kOne*z12+z->z
cml::blas_dot(hdl, &z, &z12, &gap); // z*z12 -> gap
gap = std::abs(gap); // |gap| -> gap
eps_gap = sqrtmn_atol
+ _rel_tol * cml::blas_nrm2(hdl, &z)
* cml::blas_nrm2(hdl, &z12);
eps_pri = sqrtm_atol + _rel_tol * cml::blas_nrm2(hdl, &y12);
eps_dua = _rho * (sqrtn_atol + _rel_tol * cml::blas_nrm2(hdl, &x));
CUDA_CHECK_ERR(); POP_RANGE("gapoptvaltol",gapoptvaltol,9);
DEBUG_FPRINTF(stderr, "DEBUG1: %g %g\n", sqrtm_atol,
cml::blas_nrm2(hdl, &y12));
// Apply over relaxation (optional, can set kAlpha to 1, above, to disable)
// http://web.stanford.edu/~boyd/papers/pdf/admm_distr_stats.pdf S3.4.3
PUSH_RANGE("orelax",orelax,9);
cml::vector_memcpy(&ztemp, &zt);
cml::blas_axpy(hdl, kAlpha, &z12, &ztemp);
cml::blas_axpy(hdl, kOne - kAlpha, &zprev, &ztemp);
CUDA_CHECK_ERR(); POP_RANGE("orelax",orelax,9);
// Project onto y = Ax.
PUSH_RANGE("project",project,9);
T proj_tol = kProjTolMin / ::pow(static_cast<T>(k + 1), kProjTolPow);
proj_tol = ::max(proj_tol, kProjTolMax);
// (x^{k+1},y^{k+1}) := Project(x^{k+1/2}+\tilde{x}^k , y^{k+1/2}+\tilde{y}^k)
// xtemp.data: \tilde{x}^k
// ytemp.data: \tilde{y}^k
// x.data: x^{k+1/2}
// y.data: y^{k+1/2}
_P.Project(xtemp.data, ytemp.data, kOne, x.data, y.data, proj_tol);
//hipDeviceSynchronize(); // not needed, as next call is cuda call and will follow sequentially on device
CUDA_CHECK_ERR(); POP_RANGE("project",project,9);
// Calculate residuals nrm_s (dual residual) and nrm_r (primary residual)
PUSH_RANGE("resid",resid,9);
cml::vector_memcpy(&ztemp, &zprev);
cml::blas_axpy(hdl, -kOne, &z, &ztemp); // -1*z + ztemp -> ztemp
wrapcudaDeviceSynchronize(); // not needed, as next call is cuda call and will follow sequentially on device
nrm_s = _rho * cml::blas_nrm2(hdl, &ztemp);
cml::vector_memcpy(&ztemp, &z12); // z12 has both x^{k+1/2} and y^{k+1/2}
cml::blas_axpy(hdl, -kOne, &z, &ztemp); // -1*z + ztemp -> ztemp (i.e. -x^k + x^{k+1/2} -> xtemp and -y^k + y^{k+1/2} -> ytemp)
wrapcudaDeviceSynchronize(); // not needed, as next call is cuda call and will follow sequentially on device
nrm_r = cml::blas_nrm2(hdl, &ztemp);
// Calculate exact residuals only if necessary.
bool exact = false;
if ((nrm_r < eps_pri && nrm_s < eps_dua) || use_exact_stop) {
cml::vector_memcpy(&ztemp, &z12);
_A.Mul('n', kOne, x12.data, -kOne, ytemp.data);
wrapcudaDeviceSynchronize(); // not needed, as next call is cuda call and will follow sequentially on device
nrm_r = cml::blas_nrm2(hdl, &ytemp);
if ((nrm_r < eps_pri) || use_exact_stop) {
cml::vector_memcpy(&ztemp, &z12);
cml::blas_axpy(hdl, kOne, &zt, &ztemp);
cml::blas_axpy(hdl, -kOne, &zprev, &ztemp);
_A.Mul('t', kOne, ytemp.data, kOne, xtemp.data);
wrapcudaDeviceSynchronize(); // not needed, as next call is cuda call and will follow sequentially on device
nrm_s = _rho * cml::blas_nrm2(hdl, &xtemp);
exact = true;
}
} CUDA_CHECK_ERR(); POP_RANGE("resid",resid,9);
bool stopearly = false;
if (_stop_early) {
// STOP EARLY CHECK
nrm_r_deque.push_back(nrm_r);
nrm_s_deque.push_back(nrm_s);
nrm_r_avg.push_back(
std::accumulate(nrm_r_deque.begin(), nrm_r_deque.end(), 0.0)
/ static_cast<T>(nrm_r_deque.size()));
nrm_s_avg.push_back(
std::accumulate(nrm_s_deque.begin(), nrm_s_deque.end(), 0.0)
/ static_cast<T>(nrm_s_deque.size()));
if (nrm_r_deque.size() >= QUEUELENGTH
&& nrm_r_avg.size() >= QUEUELENGTH) {
T errorlocal_r = 0;
T errorlocal_s = 0;
for (unsigned int ii = 0; ii < QUEUELENGTH; ii++) {
errorlocal_r += std::abs(nrm_r_avg[ii] - nrm_r_deque[ii]);
errorlocal_s += std::abs(nrm_s_avg[ii] - nrm_s_deque[ii]);
}
nrm_r_error.push_back(errorlocal_r / static_cast<T>(QUEUELENGTH));
nrm_s_error.push_back(errorlocal_s / static_cast<T>(QUEUELENGTH));
}
if (k > QUEUELENGTH && nrm_r_deque.size() >= QUEUELENGTH
&& nrm_r_avg.size() >= QUEUELENGTH
&& nrm_s_deque.size() >= QUEUELENGTH
&& nrm_s_avg.size() >= QUEUELENGTH && nrm_r_error.size() >= 1
&& nrm_s_error.size() >= 1
&& std::abs(nrm_r_avg.back() - nrm_r_avg.front())
< nrm_r_error.back()
&& std::abs(nrm_s_avg.back() - nrm_s_avg.front())
< nrm_s_error.back()) {
if(_verbose > 2){
Printf("Stopped Early at iteration=%d: %g %g %g : %g %g %g\n",
k, nrm_r_avg.back(), nrm_r_avg.front(),
nrm_r_error.back(), nrm_s_avg.back(), nrm_s_avg.front(),
nrm_s_error.back());
fflush(stdout);
}
stopearly = true;
}
if (nrm_r_deque.size() >= QUEUELENGTH) {
nrm_r_deque.pop_front();
}
if (nrm_s_deque.size() >= QUEUELENGTH) {
nrm_s_deque.pop_front();
}
if (nrm_r_avg.size() >= QUEUELENGTH) {
nrm_r_avg.pop_front();
}
if (nrm_s_avg.size() >= QUEUELENGTH) {
nrm_s_avg.pop_front();
}
if (nrm_r_error.size() >= QUEUELENGTH) {
nrm_r_error.pop_front();
}
if (nrm_s_error.size() >= QUEUELENGTH) {
nrm_s_error.pop_front();
}
}
// Evaluate stopping criteria.
converged = stopearly
|| (exact && nrm_r < eps_pri && nrm_s < eps_dua
&& (!_gap_stop || gap < eps_gap));
if ((_verbose > 3 && k % 1 == 0) || (_verbose > 2 && k % 10 == 0)
|| (_verbose > 1 && k % 100 == 0)
|| (_verbose > 1 && converged)) {
T optval = FuncEval(f_gpu, y12.data) + FuncEval(g_gpu, x12.data);
Printf("%5d : %.2e <? %.2e %.2e <? %.2e %.2e <? %.2e % .2e\n",
k, nrm_r, eps_pri, nrm_s, eps_dua, gap, eps_gap, optval);
fflush(stdout);
}
// Break if converged or there are nans
if (converged || k == _max_iter - 1) { // || cml::vector_any_isnan(&zt))
_final_iter = k;
#ifdef USE_NVTX
POP_RANGE(mystring,Step,1); // pop at end of loop iteration
#endif
break;
}
// Update dual variable.
PUSH_RANGE("update",update,9);
cml::blas_axpy(hdl, kAlpha, &z12, &zt);
cml::blas_axpy(hdl, kOne - kAlpha, &zprev, &zt);
cml::blas_axpy(hdl, -kOne, &z, &zt);
CUDA_CHECK_ERR(); POP_RANGE("update",update,9);
// Adaptive rho (optional)
// http://web.stanford.edu/~boyd/papers/pdf/admm_distr_stats.pdf S3.4.1
// http://www.cs.umd.edu/sites/default/files/scholarly_papers/ZhengXu.pdf or https://arxiv.org/abs/1605.07246
// choose: 1 = H2O4GPU Boyd method
// choose: 2 = Original Boyd method of balancing residuals
// choose: 3 = Spectral method by Zheng et al. 2015
int whichadap = 1;
if (_adaptive_rho && _rho != 0) {
PUSH_RANGE("adaprho",adaprho,9);
if (whichadap == 1) {
if (nrm_s < xi * eps_dua && nrm_r > xi * eps_pri
&& kTau * static_cast<T>(k) > static_cast<T>(kd)) {
if (_rho < kRhoMax) {
_rho *= delta;
cml::blas_scal(hdl, 1 / delta, &zt);
delta = kGamma * delta;
ku = k;
if (_verbose > 3)
Printf("+ rho %e\n", _rho);
}
} else if (nrm_s > xi * eps_dua && nrm_r < xi * eps_pri
&& kTau * static_cast<T>(k) > static_cast<T>(ku)) {
if (_rho > kRhoMin) {
_rho /= delta;
cml::blas_scal(hdl, delta, &zt);
delta = kGamma * delta;
kd = k;
if (_verbose > 3)
Printf("- rho %e\n", _rho);
}
} else if (nrm_s < xi * eps_dua && nrm_r < xi * eps_pri) {
xi *= kKappa;
} else {
delta = kDeltaMin;
} CUDA_CHECK_ERR();
} // end adaptive_rho==1
else if (whichadap == 2) {
if (nrm_s < xi * eps_dua && nrm_r > xi * eps_pri) {
if (_rho < kRhoMax) {
_rho *= delta;
cml::blas_scal(hdl, 1 / delta, &zt);
delta = kGamma * delta;
if (_verbose > 3)
Printf("+ rho %e\n", _rho);
}
} else if (nrm_s > xi * eps_dua && nrm_r < xi * eps_pri) {
if (_rho > kRhoMin) {
_rho /= delta;
cml::blas_scal(hdl, delta, &zt);
delta = kGamma * delta;
if (_verbose > 3)
Printf("- rho %e\n", _rho);
}
} else {
delta = kDeltaMin;
}CUDA_CHECK_ERR();
} // end adaptive_rho==2
else if (whichadap == 3) {
if (nrm_s < xi * eps_dua && nrm_r > xi * eps_pri
&& kTau * static_cast<T>(k) > static_cast<T>(kd)) {
if (_rho < kRhoMax) {
_rho *= delta;
cml::blas_scal(hdl, 1 / delta, &zt);
delta = kGamma * delta;
ku = k;
if (_verbose > 3)
Printf("+ rho %e\n", _rho);
}
} else if (nrm_s > xi * eps_dua && nrm_r < xi * eps_pri
&& kTau * static_cast<T>(k) > static_cast<T>(ku)) {
if (_rho > kRhoMin) {
_rho /= delta;
cml::blas_scal(hdl, delta, &zt);
delta = kGamma * delta;
kd = k;
if (_verbose > 3)
Printf("- rho %e\n", _rho);
}
} else if (nrm_s < xi * eps_dua && nrm_r < xi * eps_pri) {
xi *= kKappa;
} else {
delta = kDeltaMin;
} CUDA_CHECK_ERR();
} // end adaptive_rho==3
POP_RANGE("adaprho",adaprho,9);
} // end adaptive_rho
#ifdef USE_NVTX
POP_RANGE(mystring,Step,1); // pop at end of loop iteration
#endif
} // end for loop in k
// Get optimal value
_optval = FuncEval(f_gpu, y12.data) + FuncEval(g_gpu, x12.data);
// Check status
H2O4GPUStatus status;
if (!converged && k == _max_iter - 1)
status = H2O4GPU_MAX_ITER;
else if (!converged && k < _max_iter - 1)
status = H2O4GPU_NAN_FOUND;
else
status = H2O4GPU_SUCCESS;
// Get run time
_time = static_cast<T>(timer<double>() - t0);
// Print summary
if (_verbose > 0) {
Printf(__HBAR__
"Status: %s\n"
"Timing: Total = %3.2e s, Init = %3.2e s\n"
"Iter : %u\n", H2O4GPUStatusString(status).c_str(), _time, time_init,
k);
Printf(
__HBAR__
"Error Metrics:\n"
"Pri: "
"|Ax - y| / (abs_tol sqrt(m) / rel_tol + |y|) = %.2e (goal: %0.2e)\n"
"Dua: "
"|A'l + u| / (abs_tol sqrt(n) / rel_tol + |u|) = %.2e (goal: %0.2e)\n"
"Gap: "
"|x'u + y'l| / (abs_tol sqrt(m + n) / rel_tol + |x,u| |y,l|) = %.2e (goal: %0.2e, gap checked=%d)\n"
__HBAR__, _rel_tol * nrm_r / eps_pri, _rel_tol,
_rel_tol * nrm_s / eps_dua, _rel_tol, _rel_tol * gap / eps_gap,
_rel_tol, _gap_stop);
fflush(stdout);
}
// Scale x, y, lambda and mu for output.
PUSH_RANGE("Scale",Scale,1);
// xtemp and ytemp are views of ztemp, so these operations apply to xtemp and ytemp as well
cml::vector_memcpy(&ztemp, &zt); // zt->ztemp
cml::blas_axpy(hdl, -kOne, &zprev, &ztemp); // -kOne*zprev+ztemp->ztemp
cml::blas_axpy(hdl, kOne, &z12, &ztemp); // kOne*z12+ztemp->ztemp
cml::blas_scal(hdl, -_rho, &ztemp); // -_rho*ztemp -> ztemp
// operatons on limited views of ztemp
cml::vector_mul(&ytemp, &d); // ytemp*d -> ytemp
cml::vector_div(&xtemp, &e); // xtemp/e -> xtemp
cml::vector<T> x12copy = cml::vector_calloc<T>(n);
cml::vector_memcpy(&x12copy, &x12); // copy de version first to GPU
T * dcopy = new T[m]();
cml::vector_memcpy(dcopy, &d); // copy d to CPU
cml::vector_div(&y12, &d); // y12/d -> y12
cml::vector_mul(&x12, &e); // x12*e -> x12
POP_RANGE("Scale",Scale,1);
// Copy results from GPU to CPU for output.
PUSH_RANGE("Copy",Copy,1);
cml::vector_memcpy(_x, &x12); // x12->_x (GPU->CPU with vector<T>* to T*)
cml::vector_memcpy(_xp, &x12); // x12->_xp (GPU->GPU but vector<T>* to T*)
cml::vector_memcpy(_y, &y12); // y12->_y
cml::vector_memcpy(_mu, &xtemp); // xtemp->_mu
cml::vector_memcpy(_lambda, &ytemp); // ytemp->_lambda
// compute train predictions from trainPred = Atrain.xsolution
_A.Mul('n', static_cast<T>(1.), x12copy.data, static_cast<T>(0.),
_trainPredsp); // _xp and _trainPredsp are both simple pointers on GPU
cml::vector_memcpy(m, 1, _trainPreds, _trainPredsp); // pointer on GPU to pointer on CPU
for (unsigned int i = 0; i < m; i++) {
_trainPreds[i] /= dcopy[i];
// DEBUG_FPRINTF(stderr,"Tp[%d]=%g\n",i,_trainPreds[i]);
}
if (dcopy)
delete[] dcopy;
if (x12copy.data)
cml::vector_free(&x12copy);
if (mvalid > 0) {
double tpre = timer<double>();
// compute valid from validPreds = Avalid.xsolution
_A.Mulvalid('n', static_cast<T>(1.), _xp, static_cast<T>(0.),
_validPredsp);
double tpost = timer<double>();
cml::vector_memcpy(mvalid, 1, _validPreds, _validPredsp);
double tpost2cpu = timer<double>();
#ifdef DEBUG
fprintf(stderr,"PREDICT TIME: %g %g\n",tpost-tpre,tpost2cpu-tpre); fflush(stderr);
#endif
}
// compute error (not yet)
// compute mean (not yet)
// compute stddev (not yet)
// Store z.
cml::vector_memcpy(&z, &zprev); // zprev->z
// Free memory.
cml::vector_free(&z12);
cml::vector_free(&zprev);
cml::vector_free(&ztemp);
if (hdl)
hipblasDestroy(hdl);
CUDA_CHECK_ERR(); POP_RANGE("Copy",Copy,1);
// POP_RANGE("H2O4GPUSolve",H2O4GPUSolve,1);
return status;
}
template<typename T, typename M, typename P>
int H2O4GPU<T, M, P>::_Init_Predict() {
DEBUG_EXPECT(!_done_init);
if (_done_init)
return 1;
_done_init = true;
CUDACHECK(hipSetDevice(_wDev));
#ifdef DEBUG
// get device ID
int devID;
CUDACHECK(hipGetDevice(&devID));
hipDeviceProp_t props;
// get device properties
CUDACHECK(hipGetDeviceProperties(&props, devID));
#endif
#ifdef USE_NCCL2
for (int i = 0; i < _nDev; i++) {
if(i==0 && i==_nDev-1) i=_wDev; // force to chosen device
hipDeviceProp_t props;
CUDACHECK(hipGetDeviceProperties(&props, i));
CUDACHECK(hipSetDevice(i));
// CUDACHECK(hipSetDeviceFlags(hipDeviceMapHost)); // TODO: MapHostMemory
printf("Using: Compute %d.%d CUDA device: [%s] with id=%2d\n", props.major, props.minor, props.name,i); fflush(stdout);
}
// initialize nccl
std::vector<int> dList(_nDev);
for (int i = 0; i < _nDev; ++i)
dList[i] = i % nVis;
ncclComm_t* _comms = (ncclComm_t*)malloc(sizeof(ncclComm_t)*_nDev);
NCCLCHECK(ncclCommInitAll(_comms, _nDev, dList.data()));// initialize communicator (One communicator per process)
printf("# NCCL: Using devices\n");
for (int g = 0; g < _nDev; ++g) {
int cudaDev;
int rank;
hipDeviceProp_t prop;
NCCLCHECK(ncclCommCuDevice(_comms[g], &cudaDev));
NCCLCHECK(ncclCommUserRank(_comms[g], &rank));
CUDACHECK(hipGetDeviceProperties(&prop, cudaDev));
printf("# Rank %2d uses device %2d [0x%02x] %s\n", rank, cudaDev,
prop.pciBusID, prop.name); fflush(stdout);
}
#endif
PUSH_RANGE("Malloc",Malloc,1);
double t0 = timer<double>();
size_t m = _A.Rows();
size_t mvalid = _A.ValidRows();
size_t n = _A.Cols();
// local (i.e. GPU) values for _x and training predictions (i.e. predicted y from Atrain*_x)
hipMalloc(&_xp, (n) * sizeof(T));
hipMalloc(&_trainPredsp, (m) * sizeof(T));
hipMalloc(&_validPredsp, (mvalid) * sizeof(T));
hipMemset(_xp, 0, (n) * sizeof(T));
hipMemset(_trainPredsp, 0, (m) * sizeof(T));
hipMemset(_validPredsp, 0, (mvalid) * sizeof(T));
CUDA_CHECK_ERR();
_A.Init();
POP_RANGE("Malloc",Malloc,1);
#ifdef DEBUG
printf("Pred: Time to allocate data structures: %f\n", timer<double>() - t0);
#endif
return 0;
}
template<typename T, typename M, typename P>
int H2O4GPU<T, M, P>::Predict() {
double t0 = timer<double>();
// Initialize Projector P and Matrix A.
if (!_done_init) {
// PUSH_RANGE("Init2",Init2,1);
_Init_Predict();
// POP_RANGE("Init2",Init2,1);
}
CUDACHECK(hipSetDevice(_wDev));
// PUSH_RANGE("H2O4GPUSolve",H2O4GPUSolve,1);
size_t m = _A.Rows();
size_t mvalid = _A.ValidRows();
size_t n = _A.Cols();
// copy over X (assume called SetInitX) directly from CPU to GPU during fit
cml::vector<T> xtemp = cml::vector_calloc<T>(n);
CUDA_CHECK_ERR();
cml::vector_memcpy(&xtemp, _x); // _x->xtemp
CUDA_CHECK_ERR();
// compute valid from validPreds = Avalid.xsolution
_A.Mulvalid('n', static_cast<T>(1.), xtemp.data, static_cast<T>(0.),
_validPredsp);
CUDA_CHECK_ERR();
// copy back to CPU
cml::vector_memcpy(mvalid, 1, _validPreds, _validPredsp);
CUDA_CHECK_ERR();
// compute error (not yet)
// compute mean (not yet)
// compute stddev (not yet)
// Free memory.
cml::vector_free(&xtemp);
CUDA_CHECK_ERR();
return 0;
}
template<typename T, typename M, typename P>
void H2O4GPU<T, M, P>::ResetX(void) {
if (!_done_init)
_Init();
CUDACHECK(hipSetDevice(_wDev));
size_t m = _A.Rows();
size_t mvalid = _A.ValidRows();
size_t n = _A.Cols();
DEBUG_FPRINTF(stderr, "in h2o4gpu ResetX: m=%d n=%d\n", (int)m, (int)n);
hipMemset(_z, 0, (m + n) * sizeof(T));
hipMemset(_zt, 0, (m + n) * sizeof(T));
}
template<typename T, typename M, typename P>
H2O4GPU<T, M, P>::~H2O4GPU() {
CUDACHECK(hipSetDevice(_wDev));
if(1){
if (_z)
CUDACHECK(hipFree(_z));
if (_zt)
CUDACHECK(hipFree(_zt));
if (_xp)
CUDACHECK(hipFree(_xp));
if (_trainPredsp)
CUDACHECK(hipFree(_trainPredsp));
if (_validPredsp)
CUDACHECK(hipFree(_validPredsp));
CUDA_CHECK_ERR();
}
_z = _zt = _xp = _trainPredsp = _validPredsp = 0;
#ifdef USE_NCCL2
for(int i=0; i<_nDev; ++i)
ncclCommDestroy(_comms[i]);
free(_comms);
#endif
if (_x)
delete[] _x;
if (_y)
delete[] _y;
if (_mu)
delete[] _mu;
if (_lambda)
delete[] _lambda;
if (_trainPreds)
delete[] _trainPreds;
if (_validPreds)
delete[] _validPreds;
_x = _y = _mu = _lambda = _trainPreds = _validPreds = 0;
}
// Explicit template instantiation.
#if !defined(H2O4GPU_DOUBLE) || H2O4GPU_DOUBLE==1
template class H2O4GPU<double, MatrixDense<double>,
ProjectorDirect<double, MatrixDense<double> > > ;
template class H2O4GPU<double, MatrixDense<double>,
ProjectorCgls<double, MatrixDense<double> > > ;
template class H2O4GPU<double, MatrixSparse<double>,
ProjectorCgls<double, MatrixSparse<double> > > ;
#endif
#if !defined(H2O4GPU_SINGLE) || H2O4GPU_SINGLE==1
template class H2O4GPU<float, MatrixDense<float>,
ProjectorDirect<float, MatrixDense<float> > > ;
template class H2O4GPU<float, MatrixDense<float>,
ProjectorCgls<float, MatrixDense<float> > > ;
template class H2O4GPU<float, MatrixSparse<float>,
ProjectorCgls<float, MatrixSparse<float> > > ;
#endif
} // namespace h2o4gpu
| 15cadc9949e9133abaf249e4f7b22bcd57d5f98e.cu | /*!
* Modifications Copyright 2017-2018 H2O.ai, Inc.
*/
#include "solver/glm.h"
#include <stdio.h>
#include <stdlib.h>
#include <thrust/device_vector.h>
#include <thrust/functional.h>
#include <thrust/transform.h>
#include <algorithm>
#include <limits>
#include <deque>
#include <numeric>
#include "cml/cml_blas.cuh"
#include "cml/cml_vector.cuh"
#include "interface_defs.h"
#include "matrix/matrix.h"
#include "matrix/matrix_dense.h"
#include "matrix/matrix_sparse.h"
#include "projector/projector.h"
#include "projector/projector_direct.h"
#include "projector/projector_cgls.h"
#include "util.h"
#include "cuda_utils.h"
#include "timer.h"
//#include "kmeans.h"
typedef struct {
double* sendBuff;
double* recvBuff;
int size;
cudaStream_t stream;
} PerThreadData;
#define __HBAR__ \
"----------------------------------------------------------------------------\n"
namespace h2o4gpu {
namespace {
template<typename T, typename Op>
struct ApplyOp: thrust::binary_function<FunctionObj<T>, FunctionObj<T>, T> {
Op binary_op;
ApplyOp(Op binary_op) :
binary_op(binary_op) {
}
__host__ __device__ FunctionObj<T> operator()(FunctionObj<T> &h, T x) {
h.a = binary_op(h.a, x);
h.d = binary_op(h.d, x);
h.e = binary_op(binary_op(h.e, x), x);
return h;
}
};
} // namespace
template<typename T, typename M, typename P>
H2O4GPU<T, M, P>::H2O4GPU(int sharedA, int me, int wDev, const M &A) :
_A(sharedA, me, wDev, A), _P(wDev, _A), _z(0), _zt(0), _rho(
static_cast<T>(kRhoInit)), _done_init(false), _x(0), _y(0), _mu(
0), _lambda(0), _optval(static_cast<T>(0.)), _time(
static_cast<T>(0.)), _trainPreds(0), _validPreds(0), _xp(0), _trainPredsp(
0), _validPredsp(0), _trainerror(0), _validerror(0), _trainmean(
0), _validmean(0), _trainstddev(0), _validstddev(0), _final_iter(
0), _abs_tol(static_cast<T>(kAbsTol)), _rel_tol(
static_cast<T>(kRelTol)), _max_iter(kMaxIter), _stop_early(1), _stop_early_error_fraction(
1.0), _init_iter(kInitIter), _verbose(kVerbose), _adaptive_rho(
kAdaptiveRho), _equil(kEquil), _gap_stop(kGapStop), _init_x(
false), _init_lambda(false), _nDev(1), //FIXME - allow larger comm groups
_wDev(wDev)
#ifdef USE_NCCL2
,_comms(0)
#endif
{
checkwDev(_wDev);
CUDACHECK(cudaSetDevice(_wDev));
_x = new T[_A.Cols()]();
_y = new T[_A.Rows()]();
_mu = new T[_A.Cols()]();
_lambda = new T[_A.Rows()]();
_trainPreds = new T[_A.Rows()]();
_validPreds = new T[_A.ValidRows()]();
}
template<typename T, typename M, typename P>
H2O4GPU<T, M, P>::H2O4GPU(const M &A) :
_A(A._sharedA, A._me, A._wDev, A), _P(_A._wDev, _A), _z(0), _zt(0), _rho(
static_cast<T>(kRhoInit)), _done_init(false), _x(0), _y(0), _mu(
0), _lambda(0), _optval(static_cast<T>(0.)), _time(
static_cast<T>(0.)), _trainPreds(0), _validPreds(0), _xp(0), _trainPredsp(
0), _validPredsp(0), _trainerror(0), _validerror(0), _trainmean(
0), _validmean(0), _trainstddev(0), _validstddev(0), _final_iter(
0), _abs_tol(static_cast<T>(kAbsTol)), _rel_tol(
static_cast<T>(kRelTol)), _max_iter(kMaxIter), _stop_early(1), _stop_early_error_fraction(
1.0), _init_iter(kInitIter), _verbose(kVerbose), _adaptive_rho(
kAdaptiveRho), _equil(kEquil), _gap_stop(kGapStop), _init_x(
false), _init_lambda(false), _nDev(1), //FIXME - allow larger comm groups
_wDev(_A._wDev)
#ifdef USE_NCCL2
,comms(0)
#endif
{
checkwDev(_wDev);
CUDACHECK(cudaSetDevice(_wDev));
_x = new T[_A.Cols()]();
_y = new T[_A.Rows()]();
_mu = new T[_A.Cols()]();
_lambda = new T[_A.Rows()]();
_trainPreds = new T[_A.Rows()]();
_validPreds = new T[_A.ValidRows()]();
}
template<typename T, typename M, typename P>
int H2O4GPU<T, M, P>::_Init() {
DEBUG_EXPECT(!_done_init);
if (_done_init)
return 1;
_done_init = true;
CUDACHECK(cudaSetDevice(_wDev));
#ifdef DEBUG
// get device ID
int devID;
CUDACHECK(cudaGetDevice(&devID));
cudaDeviceProp props;
// get device properties
CUDACHECK(cudaGetDeviceProperties(&props, devID));
#endif
#ifdef USE_NCCL2
for (int i = 0; i < _nDev; i++) {
if(i==0 && i==_nDev-1) i=_wDev; // force to chosen device
cudaDeviceProp props;
CUDACHECK(cudaGetDeviceProperties(&props, i));
CUDACHECK(cudaSetDevice(i));
// CUDACHECK(cudaSetDeviceFlags(cudaDeviceMapHost)); // TODO: MapHostMemory
printf("Using: Compute %d.%d CUDA device: [%s] with id=%2d\n", props.major, props.minor, props.name,i); fflush(stdout);
}
// initialize nccl
std::vector<int> dList(_nDev);
for (int i = 0; i < _nDev; ++i)
dList[i] = i % nVis;
ncclComm_t* _comms = (ncclComm_t*)malloc(sizeof(ncclComm_t)*_nDev);
NCCLCHECK(ncclCommInitAll(_comms, _nDev, dList.data()));// initialize communicator (One communicator per process)
printf("# NCCL: Using devices\n");
for (int g = 0; g < _nDev; ++g) {
int cudaDev;
int rank;
cudaDeviceProp prop;
NCCLCHECK(ncclCommCuDevice(_comms[g], &cudaDev));
NCCLCHECK(ncclCommUserRank(_comms[g], &rank));
CUDACHECK(cudaGetDeviceProperties(&prop, cudaDev));
printf("# Rank %2d uses device %2d [0x%02x] %s\n", rank, cudaDev,
prop.pciBusID, prop.name); fflush(stdout);
}
#endif
PUSH_RANGE("Malloc",Malloc,1);
double t0 = timer<double>();
size_t m = _A.Rows();
size_t mvalid = _A.ValidRows();
size_t n = _A.Cols();
cudaMalloc(&_z, (m + n) * sizeof(T));
cudaMemset(_z, 0, (m + n) * sizeof(T));
cudaMalloc(&_zt, (m + n) * sizeof(T));
cudaMemset(_zt, 0, (m + n) * sizeof(T));
// local (i.e. GPU) values for _x and training predictions (i.e. predicted y from Atrain*_x)
cudaMalloc(&_xp, (n) * sizeof(T));
cudaMalloc(&_trainPredsp, (m) * sizeof(T));
cudaMalloc(&_validPredsp, (mvalid) * sizeof(T));
cudaMemset(_xp, 0, (n) * sizeof(T));
cudaMemset(_trainPredsp, 0, (m) * sizeof(T));
cudaMemset(_validPredsp, 0, (mvalid) * sizeof(T));
CUDA_CHECK_ERR();
_A.Init();
POP_RANGE("Malloc",Malloc,1);
PUSH_RANGE("Eq",Eq,1);
_A.Equil(_equil);
POP_RANGE("Eq",Eq,1);
// PUSH_RANGE("Init1",Init1,1);
_P.Init();
CUDA_CHECK_ERR();
// POP_RANGE("Init1",Init1,1);
#ifdef DEBUG
printf("Time to allocate data structures: %f\n", timer<double>() - t0);
#endif
return 0;
}
template<typename T, typename M, typename P>
H2O4GPUStatus H2O4GPU<T, M, P>::Solve(const std::vector<FunctionObj<T> > &f,
const std::vector<FunctionObj<T> > &g) {
// PUSH_RANGE("H2O4GPUSolve",H2O4GPUSolve,1);
// Initialize Projector P and Matrix A.
if (!_done_init) {
// PUSH_RANGE("Init2",Init2,1);
_Init();
// POP_RANGE("Init2",Init2,1);
}
CUDACHECK(cudaSetDevice(_wDev));
double t0 = timer<double>();
// TODO: Constants are set arbitrarily based upon limited experiments in academic papers
// Constants for adaptive-rho and over-relaxation.
const T kDeltaMin = static_cast<T>(1.05); // for adaptive rho and rescaling
const T kGamma = static_cast<T>(1.01); // for adaptive rho and rescaling
const T kTau = static_cast<T>(0.8); // for adaptive rho and rescaling
const T kAlpha = static_cast<T>(1.7); // set to 1.0 to disable over-relaxation technique, normally 1.5-1.8 and was set to 1.7
const T kKappa = static_cast<T>(0.9); // for adaptive rho and rescaling
const T kOne = static_cast<T>(1.0); // definition
const T kZero = static_cast<T>(0.0); // definition
const T kProjTolMax = static_cast<T>(1e-6); // Projection tolerance
const T kProjTolMin = static_cast<T>(1e-2); // Projection tolerance
const T kProjTolPow = static_cast<T>(1.3); // Projection tolerance
const T kProjTolIni = static_cast<T>(1e-5); // Projection tolerance
const bool use_exact_stop = true; // false does worse in trainerror and maximum number of iterations with simple.R
// fprintf(stderr,"solve _data=%p\n",_A._data); fflush(stderr);
// fprintf(stderr,"solve _datay=%p\n",_A._datay); fflush(stderr);
// Notes on variable names:
//
// Original Boyd ADMM paper solves:
// http://web.stanford.edu/~boyd/papers/pdf/admm_distr_stats.pdf
// Minimize: f(x) + g(z)
// Subject to: Ax + Bz = c
// Primary variable: x
// Dual variable: z
// Step size: \rho
// Where for Lasso: f(x) = (1/2)||x-b||_2^2 and g(z) = \lambda||z||_1 with constraint x=Az
//
// H2O4GPU paper and code:
// http://foges.github.io/h2o4gpu/ and http://stanford.edu/~boyd/papers/h2o4gpu.html
// Minimize: f(y) + g(x) for a variety (but limited set) of f and g shown in src/include/prox_lib.h
// Subject to: y = Ax (always)
// Where for Lasso: f(y) = (1/2)||y-b||_2^2 and g(x) = \lambda||x||_1 and constraint is y=Ax
// Primary variable: y
// Dual variable: x
// Step size or Proximal parameter: \rho
// Intermediate variable: z
// Internally h2o4gpu code uses \mu and \nu scaled variables, performs pre-conditioning using e and d.
// \lambda_{max} = ||A^T b|| makes sense if have (1/2) in front of f(y) for Lasso
//
// H2O4GPU overall steps:
// 1) Precondition A using d and e and renormalize variables and all equations using d and e
// 2) Compute Gramian: A^T A only once
// 3) Cholesky of gram: Only compute cholesky once -- s and info->s in Project just kOne=1 and just ensure GPU has cholesky already. Could have put into Init with Gramian)
// 4) Project: Solve L L^T x = b for x by forward and backward solve (Ly=b for y and then y=L^T x for x)
// 5) Repeat #4, until convergence from primary (min Ax-b) and dual (min f(y)+g(x)) residuals
// Extract values from h2o4gpu_data
PUSH_RANGE("H2O4GPUExtract",H2O4GPUExtract,3);
size_t m = _A.Rows();
size_t mvalid = _A.ValidRows();
size_t n = _A.Cols();
thrust::device_vector<FunctionObj<T> > f_gpu = f;
thrust::device_vector<FunctionObj<T> > g_gpu = g;
// TODO: Need to give scale to these
// const T kRhoMin = static_cast<T>(1e-4); // lower range for adaptive rho
// const T kRhoMax = static_cast<T>(1e4); // upper range for adaptive rho
const T kRhoMin = static_cast<T>(std::numeric_limits<T>::epsilon()); // lower range for adaptive rho
const T kRhoMax = static_cast<T>(1.0 / kRhoMin); // upper range for adaptive rho
POP_RANGE("H2O4GPUExtract",H2O4GPUExtract,3);
PUSH_RANGE("H2O4GPUAlloc",H2O4GPUAlloc,4);
// Create cuBLAS handle.
cublasHandle_t hdl;
cublasCreate(&hdl);
CUDA_CHECK_ERR();
// Allocate data for ADMM variables.
cml::vector<T> de = cml::vector_view_array(_A._de, m + n);
cml::vector<T> z = cml::vector_view_array(_z, m + n);
cml::vector<T> zt = cml::vector_view_array(_zt, m + n);
cml::vector<T> zprev = cml::vector_calloc<T>(m + n);
cml::vector<T> ztemp = cml::vector_calloc<T>(m + n);
cml::vector<T> z12 = cml::vector_calloc<T>(m + n);
CUDA_CHECK_ERR();
// Create views for x and y components (same memory space used, not value copy)
cml::vector<T> d = cml::vector_subvector(&de, 0, m);
cml::vector<T> e = cml::vector_subvector(&de, m, n);
cml::vector<T> x = cml::vector_subvector(&z, 0, n);
cml::vector<T> y = cml::vector_subvector(&z, n, m);
cml::vector<T> x12 = cml::vector_subvector(&z12, 0, n);
cml::vector<T> y12 = cml::vector_subvector(&z12, n, m);
cml::vector<T> xprev = cml::vector_subvector(&zprev, 0, n);
cml::vector<T> yprev = cml::vector_subvector(&zprev, n, m);
cml::vector<T> xtemp = cml::vector_subvector(&ztemp, 0, n);
cml::vector<T> ytemp = cml::vector_subvector(&ztemp, n, m);
CUDA_CHECK_ERR(); POP_RANGE("H2O4GPUAlloc",H2O4GPUAlloc,4);
PUSH_RANGE("H2O4GPUScale",H2O4GPUScale,5);
// Scale f and g to account for diagonal scaling e and d.
// f/d -> f
thrust::transform(f_gpu.begin(), f_gpu.end(),
thrust::device_pointer_cast(d.data), f_gpu.begin(),
ApplyOp<T, thrust::divides<T> >(thrust::divides<T>()));
// g*e -> g
thrust::transform(g_gpu.begin(), g_gpu.end(),
thrust::device_pointer_cast(e.data), g_gpu.begin(),
ApplyOp<T, thrust::multiplies<T> >(thrust::multiplies<T>()));
CUDA_CHECK_ERR(); POP_RANGE("H2O4GPUScale",H2O4GPUScale,5);
PUSH_RANGE("Lambda",Lambda,6);
// Initialize (x, lambda) from (x0, lambda0).
if (_init_x) {
cml::vector_memcpy(&xtemp, _x); // _x->xtemp
cml::vector_div(&xtemp, &e); // xtemp/e -> xtemp
_A.Mul('n', kOne, xtemp.data, kZero, ytemp.data); // kOne*A*x + kZero*y -> y
wrapcudaDeviceSynchronize(); // not needed, as vector_memory is cuda call and will follow sequentially on device
cml::vector_memcpy(&z, &ztemp); // ztemp->z (xtemp and ytemp are views of ztemp)
CUDA_CHECK_ERR();
}
if (_init_lambda) {
cml::vector_memcpy(&ytemp, _lambda); // _lambda->ytemp
cml::vector_div(&ytemp, &d); // ytemp/d -> ytemp
_A.Mul('t', -kOne, ytemp.data, kZero, xtemp.data); // -kOne*y+kZero*x -> x
wrapcudaDeviceSynchronize(); // not needed, as vector_memory is cuda call and will follow sequentially on device
if (_rho != 0)
cml::blas_scal(hdl, -kOne / _rho, &ztemp); // ztemp = ztemp * (-kOne/_rho)
else
cml::blas_scal(hdl, kZero, &ztemp); // ztemp = ztemp * (-kOne/_rho)
cml::vector_memcpy(&zt, &ztemp); // ztemp->zt
CUDA_CHECK_ERR();
} POP_RANGE("Lambda",Lambda,6);
PUSH_RANGE("Guess",Guess,7);
// Make an initial guess for (x0 or lambda0).
if (_init_x && !_init_lambda) {
// Alternating projections to satisfy
// 1. \lambda \in \partial f(y), \mu \in \partial g(x)
// 2. \mu = -A^T\lambda
cml::vector_set_all(&zprev, kZero); // zprev = kZero
for (unsigned int i = 0; i < kInitIter; ++i) {
#ifdef USE_NVTX
char mystring[100];
sprintf(mystring,"GStep%d",i);
PUSH_RANGE(mystring,GStep,1);
#endif
ProjSubgradEval(g_gpu, xprev.data, x.data, xtemp.data);
ProjSubgradEval(f_gpu, yprev.data, y.data, ytemp.data);
_P.Project(xtemp.data, ytemp.data, kOne, xprev.data, yprev.data,
kProjTolIni);
wrapcudaDeviceSynchronize(); // not needed, as blas's are cuda call and will follow sequentially on device
CUDA_CHECK_ERR();
cml::blas_axpy(hdl, -kOne, &ztemp, &zprev); // alpha*X + Y -> Y
cml::blas_scal(hdl, -kOne, &zprev);
#ifdef USE_NVTX
POP_RANGE(mystring,GStep,1);
#endif
}
// xt = -1 / \rho * \mu, yt = -1 / \rho * \lambda.
cml::vector_memcpy(&zt, &zprev); // zprev->zt
if (_rho != 0)
cml::blas_scal(hdl, -kOne / _rho, &zt);
else
cml::blas_scal(hdl, kZero, &zt);
} else if (_init_lambda && !_init_x) {
ASSERT(false);
}
_init_x = _init_lambda = false;
POP_RANGE("Guess",Guess,7);
// Save initialization time.
double time_init = timer<double>() - t0;
#ifdef DEBUG
printf("Time to initialize: %f\n", time_init);
#endif
// Signal start of execution.
if (_verbose > 0) {
#pragma omp critical
{
printMe(std::cout, f[1].a, f[1].b, f[1].c, f[1].d, f[1].e, g[1].a,
g[1].b, g[1].c, g[1].d, g[1].e); //debugging only: print the second since the first can be for intercept (which is then 0)
//printData(std::cout); //only works for data in host memory!
}
}
if (_verbose > 1) {
Printf(
__HBAR__
" Iter | pri res | pri tol | dua res | dua tol | gap | eps gap |"
" pri obj\n" __HBAR__);
}
// Initialize scalars.
T sqrtn_atol = std::sqrt(static_cast<T>(n)) * _abs_tol;
T sqrtm_atol = std::sqrt(static_cast<T>(m)) * _abs_tol;
T sqrtmn_atol = std::sqrt(static_cast<T>(m + n)) * _abs_tol;
T delta = kDeltaMin, xi = static_cast<T>(1.0);
unsigned int k = 0u, kd = 0u, ku = 0u;
bool converged = false;
T nrm_r, nrm_s, gap, eps_gap, eps_pri, eps_dua;
// Stop early setup
unsigned int QUEUELENGTH = 10;
std::deque<T> nrm_r_deque;
std::deque<T> nrm_s_deque;
std::deque<T> nrm_r_avg;
std::deque<T> nrm_s_avg;
std::deque<T> nrm_r_error;
std::deque<T> nrm_s_error;
// LOOP until satisfy convergence criteria
for (;; ++k) {
#ifdef USE_NVTX
char mystring[100];
sprintf(mystring,"Step%d",k);
PUSH_RANGE(mystring,Step,1);
#endif
cml::vector_memcpy(&zprev, &z);
// Evaluate Proximal Operators g and f based upon chosen problem setup
PUSH_RANGE("Evaluate_fg",Evaluate_fg,9);
cml::blas_axpy(hdl, -kOne, &zt, &z); // -kOne*zt+z -> z
ProxEval(g_gpu, _rho, x.data, x12.data); // Evaluate g(rho,x)->x12 (x^{1/2} in paper)
ProxEval(f_gpu, _rho, y.data, y12.data); // Evaluate f(rho,y)->y12 (y^{1/2} in paper)
CUDA_CHECK_ERR(); POP_RANGE("Evaluate_fg",Evaluate_fg,9);
// Compute gap, optval, and tolerances.
PUSH_RANGE("gapoptvaltol",gapoptvaltol,9);
cml::blas_axpy(hdl, -kOne, &z12, &z); // -kOne*z12+z->z
cml::blas_dot(hdl, &z, &z12, &gap); // z*z12 -> gap
gap = std::abs(gap); // |gap| -> gap
eps_gap = sqrtmn_atol
+ _rel_tol * cml::blas_nrm2(hdl, &z)
* cml::blas_nrm2(hdl, &z12);
eps_pri = sqrtm_atol + _rel_tol * cml::blas_nrm2(hdl, &y12);
eps_dua = _rho * (sqrtn_atol + _rel_tol * cml::blas_nrm2(hdl, &x));
CUDA_CHECK_ERR(); POP_RANGE("gapoptvaltol",gapoptvaltol,9);
DEBUG_FPRINTF(stderr, "DEBUG1: %g %g\n", sqrtm_atol,
cml::blas_nrm2(hdl, &y12));
// Apply over relaxation (optional, can set kAlpha to 1, above, to disable)
// http://web.stanford.edu/~boyd/papers/pdf/admm_distr_stats.pdf S3.4.3
PUSH_RANGE("orelax",orelax,9);
cml::vector_memcpy(&ztemp, &zt);
cml::blas_axpy(hdl, kAlpha, &z12, &ztemp);
cml::blas_axpy(hdl, kOne - kAlpha, &zprev, &ztemp);
CUDA_CHECK_ERR(); POP_RANGE("orelax",orelax,9);
// Project onto y = Ax.
PUSH_RANGE("project",project,9);
T proj_tol = kProjTolMin / std::pow(static_cast<T>(k + 1), kProjTolPow);
proj_tol = std::max(proj_tol, kProjTolMax);
// (x^{k+1},y^{k+1}) := Project(x^{k+1/2}+\tilde{x}^k , y^{k+1/2}+\tilde{y}^k)
// xtemp.data: \tilde{x}^k
// ytemp.data: \tilde{y}^k
// x.data: x^{k+1/2}
// y.data: y^{k+1/2}
_P.Project(xtemp.data, ytemp.data, kOne, x.data, y.data, proj_tol);
//cudaDeviceSynchronize(); // not needed, as next call is cuda call and will follow sequentially on device
CUDA_CHECK_ERR(); POP_RANGE("project",project,9);
// Calculate residuals nrm_s (dual residual) and nrm_r (primary residual)
PUSH_RANGE("resid",resid,9);
cml::vector_memcpy(&ztemp, &zprev);
cml::blas_axpy(hdl, -kOne, &z, &ztemp); // -1*z + ztemp -> ztemp
wrapcudaDeviceSynchronize(); // not needed, as next call is cuda call and will follow sequentially on device
nrm_s = _rho * cml::blas_nrm2(hdl, &ztemp);
cml::vector_memcpy(&ztemp, &z12); // z12 has both x^{k+1/2} and y^{k+1/2}
cml::blas_axpy(hdl, -kOne, &z, &ztemp); // -1*z + ztemp -> ztemp (i.e. -x^k + x^{k+1/2} -> xtemp and -y^k + y^{k+1/2} -> ytemp)
wrapcudaDeviceSynchronize(); // not needed, as next call is cuda call and will follow sequentially on device
nrm_r = cml::blas_nrm2(hdl, &ztemp);
// Calculate exact residuals only if necessary.
bool exact = false;
if ((nrm_r < eps_pri && nrm_s < eps_dua) || use_exact_stop) {
cml::vector_memcpy(&ztemp, &z12);
_A.Mul('n', kOne, x12.data, -kOne, ytemp.data);
wrapcudaDeviceSynchronize(); // not needed, as next call is cuda call and will follow sequentially on device
nrm_r = cml::blas_nrm2(hdl, &ytemp);
if ((nrm_r < eps_pri) || use_exact_stop) {
cml::vector_memcpy(&ztemp, &z12);
cml::blas_axpy(hdl, kOne, &zt, &ztemp);
cml::blas_axpy(hdl, -kOne, &zprev, &ztemp);
_A.Mul('t', kOne, ytemp.data, kOne, xtemp.data);
wrapcudaDeviceSynchronize(); // not needed, as next call is cuda call and will follow sequentially on device
nrm_s = _rho * cml::blas_nrm2(hdl, &xtemp);
exact = true;
}
} CUDA_CHECK_ERR(); POP_RANGE("resid",resid,9);
bool stopearly = false;
if (_stop_early) {
// STOP EARLY CHECK
nrm_r_deque.push_back(nrm_r);
nrm_s_deque.push_back(nrm_s);
nrm_r_avg.push_back(
std::accumulate(nrm_r_deque.begin(), nrm_r_deque.end(), 0.0)
/ static_cast<T>(nrm_r_deque.size()));
nrm_s_avg.push_back(
std::accumulate(nrm_s_deque.begin(), nrm_s_deque.end(), 0.0)
/ static_cast<T>(nrm_s_deque.size()));
if (nrm_r_deque.size() >= QUEUELENGTH
&& nrm_r_avg.size() >= QUEUELENGTH) {
T errorlocal_r = 0;
T errorlocal_s = 0;
for (unsigned int ii = 0; ii < QUEUELENGTH; ii++) {
errorlocal_r += std::abs(nrm_r_avg[ii] - nrm_r_deque[ii]);
errorlocal_s += std::abs(nrm_s_avg[ii] - nrm_s_deque[ii]);
}
nrm_r_error.push_back(errorlocal_r / static_cast<T>(QUEUELENGTH));
nrm_s_error.push_back(errorlocal_s / static_cast<T>(QUEUELENGTH));
}
if (k > QUEUELENGTH && nrm_r_deque.size() >= QUEUELENGTH
&& nrm_r_avg.size() >= QUEUELENGTH
&& nrm_s_deque.size() >= QUEUELENGTH
&& nrm_s_avg.size() >= QUEUELENGTH && nrm_r_error.size() >= 1
&& nrm_s_error.size() >= 1
&& std::abs(nrm_r_avg.back() - nrm_r_avg.front())
< nrm_r_error.back()
&& std::abs(nrm_s_avg.back() - nrm_s_avg.front())
< nrm_s_error.back()) {
if(_verbose > 2){
Printf("Stopped Early at iteration=%d: %g %g %g : %g %g %g\n",
k, nrm_r_avg.back(), nrm_r_avg.front(),
nrm_r_error.back(), nrm_s_avg.back(), nrm_s_avg.front(),
nrm_s_error.back());
fflush(stdout);
}
stopearly = true;
}
if (nrm_r_deque.size() >= QUEUELENGTH) {
nrm_r_deque.pop_front();
}
if (nrm_s_deque.size() >= QUEUELENGTH) {
nrm_s_deque.pop_front();
}
if (nrm_r_avg.size() >= QUEUELENGTH) {
nrm_r_avg.pop_front();
}
if (nrm_s_avg.size() >= QUEUELENGTH) {
nrm_s_avg.pop_front();
}
if (nrm_r_error.size() >= QUEUELENGTH) {
nrm_r_error.pop_front();
}
if (nrm_s_error.size() >= QUEUELENGTH) {
nrm_s_error.pop_front();
}
}
// Evaluate stopping criteria.
converged = stopearly
|| (exact && nrm_r < eps_pri && nrm_s < eps_dua
&& (!_gap_stop || gap < eps_gap));
if ((_verbose > 3 && k % 1 == 0) || (_verbose > 2 && k % 10 == 0)
|| (_verbose > 1 && k % 100 == 0)
|| (_verbose > 1 && converged)) {
T optval = FuncEval(f_gpu, y12.data) + FuncEval(g_gpu, x12.data);
Printf("%5d : %.2e <? %.2e %.2e <? %.2e %.2e <? %.2e % .2e\n",
k, nrm_r, eps_pri, nrm_s, eps_dua, gap, eps_gap, optval);
fflush(stdout);
}
// Break if converged or there are nans
if (converged || k == _max_iter - 1) { // || cml::vector_any_isnan(&zt))
_final_iter = k;
#ifdef USE_NVTX
POP_RANGE(mystring,Step,1); // pop at end of loop iteration
#endif
break;
}
// Update dual variable.
PUSH_RANGE("update",update,9);
cml::blas_axpy(hdl, kAlpha, &z12, &zt);
cml::blas_axpy(hdl, kOne - kAlpha, &zprev, &zt);
cml::blas_axpy(hdl, -kOne, &z, &zt);
CUDA_CHECK_ERR(); POP_RANGE("update",update,9);
// Adaptive rho (optional)
// http://web.stanford.edu/~boyd/papers/pdf/admm_distr_stats.pdf S3.4.1
// http://www.cs.umd.edu/sites/default/files/scholarly_papers/ZhengXu.pdf or https://arxiv.org/abs/1605.07246
// choose: 1 = H2O4GPU Boyd method
// choose: 2 = Original Boyd method of balancing residuals
// choose: 3 = Spectral method by Zheng et al. 2015
int whichadap = 1;
if (_adaptive_rho && _rho != 0) {
PUSH_RANGE("adaprho",adaprho,9);
if (whichadap == 1) {
if (nrm_s < xi * eps_dua && nrm_r > xi * eps_pri
&& kTau * static_cast<T>(k) > static_cast<T>(kd)) {
if (_rho < kRhoMax) {
_rho *= delta;
cml::blas_scal(hdl, 1 / delta, &zt);
delta = kGamma * delta;
ku = k;
if (_verbose > 3)
Printf("+ rho %e\n", _rho);
}
} else if (nrm_s > xi * eps_dua && nrm_r < xi * eps_pri
&& kTau * static_cast<T>(k) > static_cast<T>(ku)) {
if (_rho > kRhoMin) {
_rho /= delta;
cml::blas_scal(hdl, delta, &zt);
delta = kGamma * delta;
kd = k;
if (_verbose > 3)
Printf("- rho %e\n", _rho);
}
} else if (nrm_s < xi * eps_dua && nrm_r < xi * eps_pri) {
xi *= kKappa;
} else {
delta = kDeltaMin;
} CUDA_CHECK_ERR();
} // end adaptive_rho==1
else if (whichadap == 2) {
if (nrm_s < xi * eps_dua && nrm_r > xi * eps_pri) {
if (_rho < kRhoMax) {
_rho *= delta;
cml::blas_scal(hdl, 1 / delta, &zt);
delta = kGamma * delta;
if (_verbose > 3)
Printf("+ rho %e\n", _rho);
}
} else if (nrm_s > xi * eps_dua && nrm_r < xi * eps_pri) {
if (_rho > kRhoMin) {
_rho /= delta;
cml::blas_scal(hdl, delta, &zt);
delta = kGamma * delta;
if (_verbose > 3)
Printf("- rho %e\n", _rho);
}
} else {
delta = kDeltaMin;
}CUDA_CHECK_ERR();
} // end adaptive_rho==2
else if (whichadap == 3) {
if (nrm_s < xi * eps_dua && nrm_r > xi * eps_pri
&& kTau * static_cast<T>(k) > static_cast<T>(kd)) {
if (_rho < kRhoMax) {
_rho *= delta;
cml::blas_scal(hdl, 1 / delta, &zt);
delta = kGamma * delta;
ku = k;
if (_verbose > 3)
Printf("+ rho %e\n", _rho);
}
} else if (nrm_s > xi * eps_dua && nrm_r < xi * eps_pri
&& kTau * static_cast<T>(k) > static_cast<T>(ku)) {
if (_rho > kRhoMin) {
_rho /= delta;
cml::blas_scal(hdl, delta, &zt);
delta = kGamma * delta;
kd = k;
if (_verbose > 3)
Printf("- rho %e\n", _rho);
}
} else if (nrm_s < xi * eps_dua && nrm_r < xi * eps_pri) {
xi *= kKappa;
} else {
delta = kDeltaMin;
} CUDA_CHECK_ERR();
} // end adaptive_rho==3
POP_RANGE("adaprho",adaprho,9);
} // end adaptive_rho
#ifdef USE_NVTX
POP_RANGE(mystring,Step,1); // pop at end of loop iteration
#endif
} // end for loop in k
// Get optimal value
_optval = FuncEval(f_gpu, y12.data) + FuncEval(g_gpu, x12.data);
// Check status
H2O4GPUStatus status;
if (!converged && k == _max_iter - 1)
status = H2O4GPU_MAX_ITER;
else if (!converged && k < _max_iter - 1)
status = H2O4GPU_NAN_FOUND;
else
status = H2O4GPU_SUCCESS;
// Get run time
_time = static_cast<T>(timer<double>() - t0);
// Print summary
if (_verbose > 0) {
Printf(__HBAR__
"Status: %s\n"
"Timing: Total = %3.2e s, Init = %3.2e s\n"
"Iter : %u\n", H2O4GPUStatusString(status).c_str(), _time, time_init,
k);
Printf(
__HBAR__
"Error Metrics:\n"
"Pri: "
"|Ax - y| / (abs_tol sqrt(m) / rel_tol + |y|) = %.2e (goal: %0.2e)\n"
"Dua: "
"|A'l + u| / (abs_tol sqrt(n) / rel_tol + |u|) = %.2e (goal: %0.2e)\n"
"Gap: "
"|x'u + y'l| / (abs_tol sqrt(m + n) / rel_tol + |x,u| |y,l|) = %.2e (goal: %0.2e, gap checked=%d)\n"
__HBAR__, _rel_tol * nrm_r / eps_pri, _rel_tol,
_rel_tol * nrm_s / eps_dua, _rel_tol, _rel_tol * gap / eps_gap,
_rel_tol, _gap_stop);
fflush(stdout);
}
// Scale x, y, lambda and mu for output.
PUSH_RANGE("Scale",Scale,1);
// xtemp and ytemp are views of ztemp, so these operations apply to xtemp and ytemp as well
cml::vector_memcpy(&ztemp, &zt); // zt->ztemp
cml::blas_axpy(hdl, -kOne, &zprev, &ztemp); // -kOne*zprev+ztemp->ztemp
cml::blas_axpy(hdl, kOne, &z12, &ztemp); // kOne*z12+ztemp->ztemp
cml::blas_scal(hdl, -_rho, &ztemp); // -_rho*ztemp -> ztemp
// operatons on limited views of ztemp
cml::vector_mul(&ytemp, &d); // ytemp*d -> ytemp
cml::vector_div(&xtemp, &e); // xtemp/e -> xtemp
cml::vector<T> x12copy = cml::vector_calloc<T>(n);
cml::vector_memcpy(&x12copy, &x12); // copy de version first to GPU
T * dcopy = new T[m]();
cml::vector_memcpy(dcopy, &d); // copy d to CPU
cml::vector_div(&y12, &d); // y12/d -> y12
cml::vector_mul(&x12, &e); // x12*e -> x12
POP_RANGE("Scale",Scale,1);
// Copy results from GPU to CPU for output.
PUSH_RANGE("Copy",Copy,1);
cml::vector_memcpy(_x, &x12); // x12->_x (GPU->CPU with vector<T>* to T*)
cml::vector_memcpy(_xp, &x12); // x12->_xp (GPU->GPU but vector<T>* to T*)
cml::vector_memcpy(_y, &y12); // y12->_y
cml::vector_memcpy(_mu, &xtemp); // xtemp->_mu
cml::vector_memcpy(_lambda, &ytemp); // ytemp->_lambda
// compute train predictions from trainPred = Atrain.xsolution
_A.Mul('n', static_cast<T>(1.), x12copy.data, static_cast<T>(0.),
_trainPredsp); // _xp and _trainPredsp are both simple pointers on GPU
cml::vector_memcpy(m, 1, _trainPreds, _trainPredsp); // pointer on GPU to pointer on CPU
for (unsigned int i = 0; i < m; i++) {
_trainPreds[i] /= dcopy[i];
// DEBUG_FPRINTF(stderr,"Tp[%d]=%g\n",i,_trainPreds[i]);
}
if (dcopy)
delete[] dcopy;
if (x12copy.data)
cml::vector_free(&x12copy);
if (mvalid > 0) {
double tpre = timer<double>();
// compute valid from validPreds = Avalid.xsolution
_A.Mulvalid('n', static_cast<T>(1.), _xp, static_cast<T>(0.),
_validPredsp);
double tpost = timer<double>();
cml::vector_memcpy(mvalid, 1, _validPreds, _validPredsp);
double tpost2cpu = timer<double>();
#ifdef DEBUG
fprintf(stderr,"PREDICT TIME: %g %g\n",tpost-tpre,tpost2cpu-tpre); fflush(stderr);
#endif
}
// compute error (not yet)
// compute mean (not yet)
// compute stddev (not yet)
// Store z.
cml::vector_memcpy(&z, &zprev); // zprev->z
// Free memory.
cml::vector_free(&z12);
cml::vector_free(&zprev);
cml::vector_free(&ztemp);
if (hdl)
cublasDestroy(hdl);
CUDA_CHECK_ERR(); POP_RANGE("Copy",Copy,1);
// POP_RANGE("H2O4GPUSolve",H2O4GPUSolve,1);
return status;
}
template<typename T, typename M, typename P>
int H2O4GPU<T, M, P>::_Init_Predict() {
DEBUG_EXPECT(!_done_init);
if (_done_init)
return 1;
_done_init = true;
CUDACHECK(cudaSetDevice(_wDev));
#ifdef DEBUG
// get device ID
int devID;
CUDACHECK(cudaGetDevice(&devID));
cudaDeviceProp props;
// get device properties
CUDACHECK(cudaGetDeviceProperties(&props, devID));
#endif
#ifdef USE_NCCL2
for (int i = 0; i < _nDev; i++) {
if(i==0 && i==_nDev-1) i=_wDev; // force to chosen device
cudaDeviceProp props;
CUDACHECK(cudaGetDeviceProperties(&props, i));
CUDACHECK(cudaSetDevice(i));
// CUDACHECK(cudaSetDeviceFlags(cudaDeviceMapHost)); // TODO: MapHostMemory
printf("Using: Compute %d.%d CUDA device: [%s] with id=%2d\n", props.major, props.minor, props.name,i); fflush(stdout);
}
// initialize nccl
std::vector<int> dList(_nDev);
for (int i = 0; i < _nDev; ++i)
dList[i] = i % nVis;
ncclComm_t* _comms = (ncclComm_t*)malloc(sizeof(ncclComm_t)*_nDev);
NCCLCHECK(ncclCommInitAll(_comms, _nDev, dList.data()));// initialize communicator (One communicator per process)
printf("# NCCL: Using devices\n");
for (int g = 0; g < _nDev; ++g) {
int cudaDev;
int rank;
cudaDeviceProp prop;
NCCLCHECK(ncclCommCuDevice(_comms[g], &cudaDev));
NCCLCHECK(ncclCommUserRank(_comms[g], &rank));
CUDACHECK(cudaGetDeviceProperties(&prop, cudaDev));
printf("# Rank %2d uses device %2d [0x%02x] %s\n", rank, cudaDev,
prop.pciBusID, prop.name); fflush(stdout);
}
#endif
PUSH_RANGE("Malloc",Malloc,1);
double t0 = timer<double>();
size_t m = _A.Rows();
size_t mvalid = _A.ValidRows();
size_t n = _A.Cols();
// local (i.e. GPU) values for _x and training predictions (i.e. predicted y from Atrain*_x)
cudaMalloc(&_xp, (n) * sizeof(T));
cudaMalloc(&_trainPredsp, (m) * sizeof(T));
cudaMalloc(&_validPredsp, (mvalid) * sizeof(T));
cudaMemset(_xp, 0, (n) * sizeof(T));
cudaMemset(_trainPredsp, 0, (m) * sizeof(T));
cudaMemset(_validPredsp, 0, (mvalid) * sizeof(T));
CUDA_CHECK_ERR();
_A.Init();
POP_RANGE("Malloc",Malloc,1);
#ifdef DEBUG
printf("Pred: Time to allocate data structures: %f\n", timer<double>() - t0);
#endif
return 0;
}
template<typename T, typename M, typename P>
int H2O4GPU<T, M, P>::Predict() {
double t0 = timer<double>();
// Initialize Projector P and Matrix A.
if (!_done_init) {
// PUSH_RANGE("Init2",Init2,1);
_Init_Predict();
// POP_RANGE("Init2",Init2,1);
}
CUDACHECK(cudaSetDevice(_wDev));
// PUSH_RANGE("H2O4GPUSolve",H2O4GPUSolve,1);
size_t m = _A.Rows();
size_t mvalid = _A.ValidRows();
size_t n = _A.Cols();
// copy over X (assume called SetInitX) directly from CPU to GPU during fit
cml::vector<T> xtemp = cml::vector_calloc<T>(n);
CUDA_CHECK_ERR();
cml::vector_memcpy(&xtemp, _x); // _x->xtemp
CUDA_CHECK_ERR();
// compute valid from validPreds = Avalid.xsolution
_A.Mulvalid('n', static_cast<T>(1.), xtemp.data, static_cast<T>(0.),
_validPredsp);
CUDA_CHECK_ERR();
// copy back to CPU
cml::vector_memcpy(mvalid, 1, _validPreds, _validPredsp);
CUDA_CHECK_ERR();
// compute error (not yet)
// compute mean (not yet)
// compute stddev (not yet)
// Free memory.
cml::vector_free(&xtemp);
CUDA_CHECK_ERR();
return 0;
}
template<typename T, typename M, typename P>
void H2O4GPU<T, M, P>::ResetX(void) {
if (!_done_init)
_Init();
CUDACHECK(cudaSetDevice(_wDev));
size_t m = _A.Rows();
size_t mvalid = _A.ValidRows();
size_t n = _A.Cols();
DEBUG_FPRINTF(stderr, "in h2o4gpu ResetX: m=%d n=%d\n", (int)m, (int)n);
cudaMemset(_z, 0, (m + n) * sizeof(T));
cudaMemset(_zt, 0, (m + n) * sizeof(T));
}
template<typename T, typename M, typename P>
H2O4GPU<T, M, P>::~H2O4GPU() {
CUDACHECK(cudaSetDevice(_wDev));
if(1){
if (_z)
CUDACHECK(cudaFree(_z));
if (_zt)
CUDACHECK(cudaFree(_zt));
if (_xp)
CUDACHECK(cudaFree(_xp));
if (_trainPredsp)
CUDACHECK(cudaFree(_trainPredsp));
if (_validPredsp)
CUDACHECK(cudaFree(_validPredsp));
CUDA_CHECK_ERR();
}
_z = _zt = _xp = _trainPredsp = _validPredsp = 0;
#ifdef USE_NCCL2
for(int i=0; i<_nDev; ++i)
ncclCommDestroy(_comms[i]);
free(_comms);
#endif
if (_x)
delete[] _x;
if (_y)
delete[] _y;
if (_mu)
delete[] _mu;
if (_lambda)
delete[] _lambda;
if (_trainPreds)
delete[] _trainPreds;
if (_validPreds)
delete[] _validPreds;
_x = _y = _mu = _lambda = _trainPreds = _validPreds = 0;
}
// Explicit template instantiation.
#if !defined(H2O4GPU_DOUBLE) || H2O4GPU_DOUBLE==1
template class H2O4GPU<double, MatrixDense<double>,
ProjectorDirect<double, MatrixDense<double> > > ;
template class H2O4GPU<double, MatrixDense<double>,
ProjectorCgls<double, MatrixDense<double> > > ;
template class H2O4GPU<double, MatrixSparse<double>,
ProjectorCgls<double, MatrixSparse<double> > > ;
#endif
#if !defined(H2O4GPU_SINGLE) || H2O4GPU_SINGLE==1
template class H2O4GPU<float, MatrixDense<float>,
ProjectorDirect<float, MatrixDense<float> > > ;
template class H2O4GPU<float, MatrixDense<float>,
ProjectorCgls<float, MatrixDense<float> > > ;
template class H2O4GPU<float, MatrixSparse<float>,
ProjectorCgls<float, MatrixSparse<float> > > ;
#endif
} // namespace h2o4gpu
|
be8e7a18a6e7f05f3a21991b57ae97a83f743601.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Automatically-generated kernel for %(name)s
For multivariate distributions, coordinates to utilize shared memory
TODO: How to avoid bank conflicts
TODO: How to ensure coalescence
*/
__global__ void k_%(name)s(float* g_output,
float* g_data,
float* g_params,
int data_per_block,
int params_per_block,
int data_rows,
int data_stride,
int data_cols,
int params_rows,
int params_stride) {
unsigned int tid = threadIdx.y * blockDim.x + threadIdx.x;
unsigned int rel_param = tid / data_per_block;
unsigned int rel_data = tid - rel_param * data_per_block;
unsigned int obs_num = data_per_block * blockIdx.x + rel_data;
unsigned int param_num = params_per_block * blockIdx.y + rel_param;
// set up shared data
extern __shared__ float shared_data[];
float* sh_params = shared_data;
float* sh_data = sh_params + params_per_block * params_stride;
float* sh_result = sh_data + data_per_block * data_stride;
copy_chunks(g_data + data_per_block * blockIdx.x * data_stride,
sh_data, tid,
min(data_rows - data_per_block * blockIdx.x,
data_per_block) * data_stride);
copy_chunks(g_params + params_per_block * blockIdx.y * params_stride,
sh_params, tid,
min(params_per_block,
params_rows - params_per_block * blockIdx.y) * params_stride);
__syncthreads();
// allocated enough shared memory so that this will not walk out of bounds
// no matter what, though some of the results will be garbage
sh_result[tid] = %(name)s(sh_data + rel_data * data_stride,
sh_params + rel_param * params_stride,
data_cols);
__syncthreads();
unsigned int result_idx = data_rows * param_num + obs_num;
// unsigned int result_idx = obs_num * data_cols + param_num
// g_output is column-major, so this will then coalesce
if (obs_num < data_rows & param_num < params_rows) {
g_output[result_idx] = sh_result[tid];
}
}
// foo
| be8e7a18a6e7f05f3a21991b57ae97a83f743601.cu | /*
Automatically-generated kernel for %(name)s
For multivariate distributions, coordinates to utilize shared memory
TODO: How to avoid bank conflicts
TODO: How to ensure coalescence
*/
__global__ void k_%(name)s(float* g_output,
float* g_data,
float* g_params,
int data_per_block,
int params_per_block,
int data_rows,
int data_stride,
int data_cols,
int params_rows,
int params_stride) {
unsigned int tid = threadIdx.y * blockDim.x + threadIdx.x;
unsigned int rel_param = tid / data_per_block;
unsigned int rel_data = tid - rel_param * data_per_block;
unsigned int obs_num = data_per_block * blockIdx.x + rel_data;
unsigned int param_num = params_per_block * blockIdx.y + rel_param;
// set up shared data
extern __shared__ float shared_data[];
float* sh_params = shared_data;
float* sh_data = sh_params + params_per_block * params_stride;
float* sh_result = sh_data + data_per_block * data_stride;
copy_chunks(g_data + data_per_block * blockIdx.x * data_stride,
sh_data, tid,
min(data_rows - data_per_block * blockIdx.x,
data_per_block) * data_stride);
copy_chunks(g_params + params_per_block * blockIdx.y * params_stride,
sh_params, tid,
min(params_per_block,
params_rows - params_per_block * blockIdx.y) * params_stride);
__syncthreads();
// allocated enough shared memory so that this will not walk out of bounds
// no matter what, though some of the results will be garbage
sh_result[tid] = %(name)s(sh_data + rel_data * data_stride,
sh_params + rel_param * params_stride,
data_cols);
__syncthreads();
unsigned int result_idx = data_rows * param_num + obs_num;
// unsigned int result_idx = obs_num * data_cols + param_num
// g_output is column-major, so this will then coalesce
if (obs_num < data_rows & param_num < params_rows) {
g_output[result_idx] = sh_result[tid];
}
}
// foo
|
9ae0fc24058332f918eac140e8ce60733e5ab422.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@generated from magmablas/zlarf.cu, normal z -> d, Sun Nov 20 20:20:28 2016
@author Azzam Haidar
*/
#include "magma_internal.h"
#include "magma_templates.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define BLOCK_SIZEx 32
#define BLOCK_SIZEy 16
/******************************************************************************/
__global__
void magma_dlarf_kernel(
int m, const double *dv, const double *dtau,
double *dc, int lddc )
{
if ( !MAGMA_D_EQUAL(*dtau, MAGMA_D_ZERO) ) {
const int tx = threadIdx.x;
dc = dc + blockIdx.x * lddc;
__shared__ double sum[ BLOCK_SIZE ];
double tmp;
/* perform w := v**H * C */
if (tx == 0)
tmp = dc[0]; //since V[0] should be one
else
tmp = MAGMA_D_ZERO;
for( int j = tx+1; j < m; j += BLOCK_SIZE ) {
tmp += MAGMA_D_MUL( MAGMA_D_CONJ( dv[j] ), dc[j] );
}
sum[tx] = tmp;
magma_sum_reduce< BLOCK_SIZE >( tx, sum );
/* C := C - v * w */
__syncthreads();
tmp = - MAGMA_D_CONJ(*dtau) * sum[0];
for( int j = m-tx-1; j > 0; j -= BLOCK_SIZE )
dc[j] += tmp * dv[j];
if (tx == 0) dc[0] += tmp;
}
}
/******************************************************************************/
__global__
void magma_dlarf_smkernel(
int m, int n, double *dv, double *dtau,
double *dc, int lddc )
{
if ( ! MAGMA_D_EQUAL(*dtau, MAGMA_D_ZERO) ) {
const int i = threadIdx.x, col= threadIdx.y;
for( int k = col; k < n; k += BLOCK_SIZEy ) {
dc = dc + k * lddc;
__shared__ double sum[ BLOCK_SIZEx ][ BLOCK_SIZEy + 1];
double lsum;
/* w := v**H * C */
lsum = MAGMA_D_ZERO;
for( int j = i; j < m; j += BLOCK_SIZEx ) {
if (j == 0)
lsum += MAGMA_D_MUL( MAGMA_D_ONE, dc[j] );
else
lsum += MAGMA_D_MUL( MAGMA_D_CONJ( dv[j] ), dc[j] );
}
sum[i][col] = lsum;
magma_sum_reduce_2d< BLOCK_SIZEx, BLOCK_SIZEy+1 >( i, col, sum );
/* C := C - v * w */
__syncthreads();
double z__1 = - MAGMA_D_CONJ(*dtau) * sum[0][col];
for( int j = m-i-1; j >= 0; j -= BLOCK_SIZEx ) {
if (j == 0)
dc[j] += z__1;
else
dc[j] += z__1 * dv[j];
}
}
}
}
/******************************************************************************/
/*
Apply a real elementary reflector H to a real M-by-N
matrix C from the left. H is represented in the form
H = I - tau * v * v**H
where tau is a real scalar and v is a real vector.
If tau = 0, then H is taken to be the unit matrix.
To apply H**H (the conjugate transpose of H), supply conjg(tau)
instead tau.
This routine uses only one SM (block).
*/
extern "C" void
magma_dlarf_sm(
magma_int_t m, magma_int_t n,
double *dv, double *dtau,
double *dc, magma_int_t lddc,
magma_queue_t queue )
{
dim3 blocks( 1 );
dim3 threads( BLOCK_SIZEx, BLOCK_SIZEy );
hipLaunchKernelGGL(( magma_dlarf_smkernel)
, dim3(blocks), dim3(threads), 0, queue->cuda_stream() ,
m, n, dv, dtau, dc, lddc );
}
/***************************************************************************//**
Apply a real elementary reflector H to a real M-by-N
matrix C from the left. H is represented in the form
H = I - tau * v * v**H
where tau is a real scalar and v is a real vector.
If tau = 0, then H is taken to be the unit matrix.
To apply H**H (the conjugate transpose of H), supply conjg(tau)
instead tau.
*******************************************************************************/
extern "C" magma_int_t
magma_dlarf_gpu(
magma_int_t m, magma_int_t n,
magmaDouble_const_ptr dv,
magmaDouble_const_ptr dtau,
magmaDouble_ptr dC, magma_int_t lddc,
magma_queue_t queue )
{
dim3 grid( n, 1, 1 );
dim3 threads( BLOCK_SIZE );
if ( n > 0 ) {
hipLaunchKernelGGL(( magma_dlarf_kernel)
, dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, dv, dtau, dC, lddc);
}
// The computation can be done on 1 SM with the following routine.
// magma_dlarf_sm(m, n, dv, dtau, dc, lddc);
return MAGMA_SUCCESS;
}
| 9ae0fc24058332f918eac140e8ce60733e5ab422.cu | /*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@generated from magmablas/zlarf.cu, normal z -> d, Sun Nov 20 20:20:28 2016
@author Azzam Haidar
*/
#include "magma_internal.h"
#include "magma_templates.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define BLOCK_SIZEx 32
#define BLOCK_SIZEy 16
/******************************************************************************/
__global__
void magma_dlarf_kernel(
int m, const double *dv, const double *dtau,
double *dc, int lddc )
{
if ( !MAGMA_D_EQUAL(*dtau, MAGMA_D_ZERO) ) {
const int tx = threadIdx.x;
dc = dc + blockIdx.x * lddc;
__shared__ double sum[ BLOCK_SIZE ];
double tmp;
/* perform w := v**H * C */
if (tx == 0)
tmp = dc[0]; //since V[0] should be one
else
tmp = MAGMA_D_ZERO;
for( int j = tx+1; j < m; j += BLOCK_SIZE ) {
tmp += MAGMA_D_MUL( MAGMA_D_CONJ( dv[j] ), dc[j] );
}
sum[tx] = tmp;
magma_sum_reduce< BLOCK_SIZE >( tx, sum );
/* C := C - v * w */
__syncthreads();
tmp = - MAGMA_D_CONJ(*dtau) * sum[0];
for( int j = m-tx-1; j > 0; j -= BLOCK_SIZE )
dc[j] += tmp * dv[j];
if (tx == 0) dc[0] += tmp;
}
}
/******************************************************************************/
__global__
void magma_dlarf_smkernel(
int m, int n, double *dv, double *dtau,
double *dc, int lddc )
{
if ( ! MAGMA_D_EQUAL(*dtau, MAGMA_D_ZERO) ) {
const int i = threadIdx.x, col= threadIdx.y;
for( int k = col; k < n; k += BLOCK_SIZEy ) {
dc = dc + k * lddc;
__shared__ double sum[ BLOCK_SIZEx ][ BLOCK_SIZEy + 1];
double lsum;
/* w := v**H * C */
lsum = MAGMA_D_ZERO;
for( int j = i; j < m; j += BLOCK_SIZEx ) {
if (j == 0)
lsum += MAGMA_D_MUL( MAGMA_D_ONE, dc[j] );
else
lsum += MAGMA_D_MUL( MAGMA_D_CONJ( dv[j] ), dc[j] );
}
sum[i][col] = lsum;
magma_sum_reduce_2d< BLOCK_SIZEx, BLOCK_SIZEy+1 >( i, col, sum );
/* C := C - v * w */
__syncthreads();
double z__1 = - MAGMA_D_CONJ(*dtau) * sum[0][col];
for( int j = m-i-1; j >= 0; j -= BLOCK_SIZEx ) {
if (j == 0)
dc[j] += z__1;
else
dc[j] += z__1 * dv[j];
}
}
}
}
/******************************************************************************/
/*
Apply a real elementary reflector H to a real M-by-N
matrix C from the left. H is represented in the form
H = I - tau * v * v**H
where tau is a real scalar and v is a real vector.
If tau = 0, then H is taken to be the unit matrix.
To apply H**H (the conjugate transpose of H), supply conjg(tau)
instead tau.
This routine uses only one SM (block).
*/
extern "C" void
magma_dlarf_sm(
magma_int_t m, magma_int_t n,
double *dv, double *dtau,
double *dc, magma_int_t lddc,
magma_queue_t queue )
{
dim3 blocks( 1 );
dim3 threads( BLOCK_SIZEx, BLOCK_SIZEy );
magma_dlarf_smkernel
<<< blocks, threads, 0, queue->cuda_stream() >>>
( m, n, dv, dtau, dc, lddc );
}
/***************************************************************************//**
Apply a real elementary reflector H to a real M-by-N
matrix C from the left. H is represented in the form
H = I - tau * v * v**H
where tau is a real scalar and v is a real vector.
If tau = 0, then H is taken to be the unit matrix.
To apply H**H (the conjugate transpose of H), supply conjg(tau)
instead tau.
*******************************************************************************/
extern "C" magma_int_t
magma_dlarf_gpu(
magma_int_t m, magma_int_t n,
magmaDouble_const_ptr dv,
magmaDouble_const_ptr dtau,
magmaDouble_ptr dC, magma_int_t lddc,
magma_queue_t queue )
{
dim3 grid( n, 1, 1 );
dim3 threads( BLOCK_SIZE );
if ( n > 0 ) {
magma_dlarf_kernel
<<< grid, threads, 0, queue->cuda_stream() >>>
( m, dv, dtau, dC, lddc);
}
// The computation can be done on 1 SM with the following routine.
// magma_dlarf_sm(m, n, dv, dtau, dc, lddc);
return MAGMA_SUCCESS;
}
|
738749d4e980cf1be6763f451f03fd020e717aeb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/batch_sparse_to_dense_op.h"
#include <hipcub/hipcub.hpp>
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
namespace {
void array_prefix_sum_inclusive(
const int64_t* dev_array,
const int num_items,
Tensor& prefix_buffer,
Tensor& prefix_sum,
CUDAContext& context) {
// Retrieve buffer size
size_t temp_storage_bytes = 0;
prefix_sum.Resize(num_items);
hipcub::DeviceScan::InclusiveSum(
nullptr,
temp_storage_bytes,
dev_array,
prefix_sum.mutable_data<int64_t>(),
num_items,
context.cuda_stream());
// Allocate temporary storage
auto buffer_size = (temp_storage_bytes + sizeof(int64_t)) / sizeof(int64_t);
prefix_buffer.Resize(buffer_size);
void* dev_temp_storage =
static_cast<void*>(prefix_buffer.mutable_data<int64_t>());
// Inclusive sum
hipcub::DeviceScan::InclusiveSum(
dev_temp_storage,
temp_storage_bytes,
dev_array,
prefix_sum.mutable_data<int64_t>(),
num_items,
context.cuda_stream());
}
__global__ void FillInDenseValuesKernel(
const int64_t batch_size,
const int64_t dense_last_dim,
const int64_t* indices_data,
const float* values_data,
const int64_t* L_cum_sum_data,
float* output_data) {
CUDA_1D_KERNEL_LOOP(idx, batch_size) {
int offset_start = idx == 0 ? 0 : L_cum_sum_data[idx - 1];
int offset_end = L_cum_sum_data[idx];
for (int q = offset_start; q < offset_end; q++) {
int indice = indices_data[q];
float val = values_data[q];
output_data[idx * dense_last_dim + indice] = val;
}
}
}
__global__ void FillInSparseValuesKernel(
const int64_t batch_size,
const int64_t dense_last_dim,
const int64_t* indices_data,
const float* dense_data,
const int64_t* L_cum_sum_data,
float* output_data) {
CUDA_1D_KERNEL_LOOP(idx, batch_size) {
int offset_start = idx == 0 ? 0 : L_cum_sum_data[idx - 1];
int offset_end = L_cum_sum_data[idx];
for (int q = offset_start; q < offset_end; q++) {
int indice = indices_data[q];
output_data[q] = dense_data[idx * dense_last_dim + indice];
}
}
}
} // namespace
template <>
void BatchSparseToDenseOp<float, CUDAContext>::FillInDenseValues(
const int64_t batch_size,
const int64_t indice_lengths,
const int64_t* lengths_data,
const int64_t* indices_data,
const float* values_data,
float* output_data,
CUDAContext* context) {
// calculate the prefix sum of the length array
array_prefix_sum_inclusive(
lengths_data, batch_size, len_prefix_tmp_, len_prefix_sum_, context_);
// launch the gpu kernel for to fill in dense values
const int64_t min_size = 1;
hipLaunchKernelGGL(( FillInDenseValuesKernel),
dim3(CAFFE_GET_BLOCKS(::max(batch_size, min_size))),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
batch_size,
dense_last_dim_,
indices_data,
values_data,
len_prefix_sum_.data<int64_t>(),
output_data);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
template <>
void BatchDenseToSparseOp<float, CUDAContext>::FillInSparseValues(
const int64_t batch_size,
const int64_t indice_lengths,
const int64_t* lengths_data,
const int64_t* indices_data,
const float* dense_data,
float* output_data,
CUDAContext* context) {
// calculate the prefix sum of the length array
array_prefix_sum_inclusive(
lengths_data, batch_size, len_prefix_tmp_, len_prefix_sum_, context_);
// launch the gpu kernel for to fill in sparse values
const int64_t min_size = 1;
hipLaunchKernelGGL(( FillInSparseValuesKernel),
dim3(CAFFE_GET_BLOCKS(::max(batch_size, min_size))),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
batch_size,
dense_last_dim_,
indices_data,
dense_data,
len_prefix_sum_.data<int64_t>(),
output_data);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
REGISTER_CUDA_OPERATOR(
BatchSparseToDense,
BatchSparseToDenseOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
BatchDenseToSparse,
BatchDenseToSparseOp<float, CUDAContext>);
} // namespace caffe2
| 738749d4e980cf1be6763f451f03fd020e717aeb.cu | #include "caffe2/operators/batch_sparse_to_dense_op.h"
#include <cub/device/device_scan.cuh>
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
namespace {
void array_prefix_sum_inclusive(
const int64_t* dev_array,
const int num_items,
Tensor& prefix_buffer,
Tensor& prefix_sum,
CUDAContext& context) {
// Retrieve buffer size
size_t temp_storage_bytes = 0;
prefix_sum.Resize(num_items);
cub::DeviceScan::InclusiveSum(
nullptr,
temp_storage_bytes,
dev_array,
prefix_sum.mutable_data<int64_t>(),
num_items,
context.cuda_stream());
// Allocate temporary storage
auto buffer_size = (temp_storage_bytes + sizeof(int64_t)) / sizeof(int64_t);
prefix_buffer.Resize(buffer_size);
void* dev_temp_storage =
static_cast<void*>(prefix_buffer.mutable_data<int64_t>());
// Inclusive sum
cub::DeviceScan::InclusiveSum(
dev_temp_storage,
temp_storage_bytes,
dev_array,
prefix_sum.mutable_data<int64_t>(),
num_items,
context.cuda_stream());
}
__global__ void FillInDenseValuesKernel(
const int64_t batch_size,
const int64_t dense_last_dim,
const int64_t* indices_data,
const float* values_data,
const int64_t* L_cum_sum_data,
float* output_data) {
CUDA_1D_KERNEL_LOOP(idx, batch_size) {
int offset_start = idx == 0 ? 0 : L_cum_sum_data[idx - 1];
int offset_end = L_cum_sum_data[idx];
for (int q = offset_start; q < offset_end; q++) {
int indice = indices_data[q];
float val = values_data[q];
output_data[idx * dense_last_dim + indice] = val;
}
}
}
__global__ void FillInSparseValuesKernel(
const int64_t batch_size,
const int64_t dense_last_dim,
const int64_t* indices_data,
const float* dense_data,
const int64_t* L_cum_sum_data,
float* output_data) {
CUDA_1D_KERNEL_LOOP(idx, batch_size) {
int offset_start = idx == 0 ? 0 : L_cum_sum_data[idx - 1];
int offset_end = L_cum_sum_data[idx];
for (int q = offset_start; q < offset_end; q++) {
int indice = indices_data[q];
output_data[q] = dense_data[idx * dense_last_dim + indice];
}
}
}
} // namespace
template <>
void BatchSparseToDenseOp<float, CUDAContext>::FillInDenseValues(
const int64_t batch_size,
const int64_t indice_lengths,
const int64_t* lengths_data,
const int64_t* indices_data,
const float* values_data,
float* output_data,
CUDAContext* context) {
// calculate the prefix sum of the length array
array_prefix_sum_inclusive(
lengths_data, batch_size, len_prefix_tmp_, len_prefix_sum_, context_);
// launch the gpu kernel for to fill in dense values
const int64_t min_size = 1;
FillInDenseValuesKernel<<<
CAFFE_GET_BLOCKS(std::max(batch_size, min_size)),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
batch_size,
dense_last_dim_,
indices_data,
values_data,
len_prefix_sum_.data<int64_t>(),
output_data);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
template <>
void BatchDenseToSparseOp<float, CUDAContext>::FillInSparseValues(
const int64_t batch_size,
const int64_t indice_lengths,
const int64_t* lengths_data,
const int64_t* indices_data,
const float* dense_data,
float* output_data,
CUDAContext* context) {
// calculate the prefix sum of the length array
array_prefix_sum_inclusive(
lengths_data, batch_size, len_prefix_tmp_, len_prefix_sum_, context_);
// launch the gpu kernel for to fill in sparse values
const int64_t min_size = 1;
FillInSparseValuesKernel<<<
CAFFE_GET_BLOCKS(std::max(batch_size, min_size)),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
batch_size,
dense_last_dim_,
indices_data,
dense_data,
len_prefix_sum_.data<int64_t>(),
output_data);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
REGISTER_CUDA_OPERATOR(
BatchSparseToDense,
BatchSparseToDenseOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
BatchDenseToSparse,
BatchDenseToSparseOp<float, CUDAContext>);
} // namespace caffe2
|
90e8cf7882a49393b8ede3f91d7a4b73abe682be.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "brick-cuda.h"
#include "head.h"
#include "headcu.h"
#define out(i, j, k) out_arr[k][j][i]
#define in(i, j, k) in_arr[k][j][i]
__global__ void
arr_kernel(bElem *in_ptr, bElem *out_ptr) {
auto in_arr = (bElem (*)[STRIDE][STRIDE]) in_ptr;
auto out_arr = (bElem (*)[STRIDE][STRIDE]) out_ptr;
#include "arrcusched.h"
{
#include "kernel.h"
}
}
#undef out
#undef in
__global__ void
brick_kernel(unsigned (*grid)[STRIDE/TILEJ][STRIDE/TILEI], Brick3D in, Brick3D out) {
#include "bricusched.h"
brick("kernel.py", BVEC, (TILEK, TILEJ, TILEI), (BFOLD), b);
}
int main() {
// allocations
auto in_arr = randomArray({STRIDE, STRIDE, STRIDE});
bElem *in_dev;
copyToDevice({STRIDE, STRIDE, STRIDE}, in_dev, in_arr);
auto out_arr = zeroArray({STRIDE, STRIDE, STRIDE});
bElem *out_dev;
copyToDevice({STRIDE, STRIDE, STRIDE}, out_dev, out_arr);
{
auto compute = [&]() -> void {
dim3 block(N/TILEI, N/TILEJ, N/TILEK), thread(_TILEI, _TILEJ, _TILEK);
hipLaunchKernelGGL(( arr_kernel), dim3(block), dim3(thread) , 0, 0, in_dev, out_dev);
};
#ifndef TYPE
#include "cutiming.h"
#else
compute();
#endif
copyFromDevice({STRIDE, STRIDE, STRIDE}, out_arr, out_dev);
}
#if TYPE == 1
{
unsigned *grid_ptr;
unsigned bSize = TILEK * TILEJ * TILEI;
auto bInfo = init_grid<3>(grid_ptr, {STRIDE/TILEK, STRIDE/TILEJ, STRIDE/TILEI});
unsigned *grid_dev;
copyToDevice({STRIDE/TILEK, STRIDE/TILEJ, STRIDE/TILEI}, grid_dev, grid_ptr);
auto bStorage = BrickStorage::allocate(bInfo.nbricks, bSize * 2);
Brick<Dim<TILEK, TILEJ, TILEI>, Dim<BFOLD>> in_bri(&bInfo, &bStorage, 0);
Brick<Dim<TILEK, TILEJ, TILEI>, Dim<BFOLD>> out_bri(&bInfo, &bStorage, bSize);
BrickInfo<3> *bInfo_dev;
auto _bInfo_dev = movBrickInfo(bInfo, hipMemcpyHostToDevice);
{
unsigned size = sizeof(BrickInfo<3>);
hipMalloc(&bInfo_dev, size);
hipMemcpy(bInfo_dev, &_bInfo_dev, size, hipMemcpyHostToDevice);
}
copyBrick<3>({STRIDE, STRIDE, STRIDE}, in_arr, grid_ptr, in_bri);
BrickStorage *bStorage_dev;
BrickStorage _bStorage_dev = movBrickStorage(bStorage, hipMemcpyHostToDevice);
{
unsigned size = sizeof(BrickStorage);
hipMalloc(&bStorage_dev, size);
hipMemcpy(bStorage_dev, &_bStorage_dev, size, hipMemcpyHostToDevice);
}
auto compute = [&]() -> void {
Brick3D bIn(bInfo_dev, &_bStorage_dev, 0);
Brick3D bOut(bInfo_dev, &_bStorage_dev, bSize);
bIn.bStorage = bStorage_dev;
bOut.bStorage = bStorage_dev;
auto grid = (unsigned (*)[STRIDE/TILEJ][STRIDE/TILEI]) grid_dev;
dim3 block(N/TILEI, N/TILEJ, N/TILEK), thread(32);
hipLaunchKernelGGL(( brick_kernel), dim3(block), dim3(thread) , 0, 0, grid, bIn, bOut);
};
#include "cutiming.h"
hipDeviceSynchronize();
hipMemcpy(bStorage.dat, _bStorage_dev.dat, bStorage.chunks * bStorage.step * sizeof(bElem), hipMemcpyDeviceToHost);
if (!compareBrick<3>({STRIDE, STRIDE, STRIDE}, out_arr, grid_ptr, out_bri))
return 1;
}
#endif
return 0;
}
| 90e8cf7882a49393b8ede3f91d7a4b73abe682be.cu | #include "brick-cuda.h"
#include "head.h"
#include "headcu.h"
#define out(i, j, k) out_arr[k][j][i]
#define in(i, j, k) in_arr[k][j][i]
__global__ void
arr_kernel(bElem *in_ptr, bElem *out_ptr) {
auto in_arr = (bElem (*)[STRIDE][STRIDE]) in_ptr;
auto out_arr = (bElem (*)[STRIDE][STRIDE]) out_ptr;
#include "arrcusched.h"
{
#include "kernel.h"
}
}
#undef out
#undef in
__global__ void
brick_kernel(unsigned (*grid)[STRIDE/TILEJ][STRIDE/TILEI], Brick3D in, Brick3D out) {
#include "bricusched.h"
brick("kernel.py", BVEC, (TILEK, TILEJ, TILEI), (BFOLD), b);
}
int main() {
// allocations
auto in_arr = randomArray({STRIDE, STRIDE, STRIDE});
bElem *in_dev;
copyToDevice({STRIDE, STRIDE, STRIDE}, in_dev, in_arr);
auto out_arr = zeroArray({STRIDE, STRIDE, STRIDE});
bElem *out_dev;
copyToDevice({STRIDE, STRIDE, STRIDE}, out_dev, out_arr);
{
auto compute = [&]() -> void {
dim3 block(N/TILEI, N/TILEJ, N/TILEK), thread(_TILEI, _TILEJ, _TILEK);
arr_kernel<<< block, thread >>>(in_dev, out_dev);
};
#ifndef TYPE
#include "cutiming.h"
#else
compute();
#endif
copyFromDevice({STRIDE, STRIDE, STRIDE}, out_arr, out_dev);
}
#if TYPE == 1
{
unsigned *grid_ptr;
unsigned bSize = TILEK * TILEJ * TILEI;
auto bInfo = init_grid<3>(grid_ptr, {STRIDE/TILEK, STRIDE/TILEJ, STRIDE/TILEI});
unsigned *grid_dev;
copyToDevice({STRIDE/TILEK, STRIDE/TILEJ, STRIDE/TILEI}, grid_dev, grid_ptr);
auto bStorage = BrickStorage::allocate(bInfo.nbricks, bSize * 2);
Brick<Dim<TILEK, TILEJ, TILEI>, Dim<BFOLD>> in_bri(&bInfo, &bStorage, 0);
Brick<Dim<TILEK, TILEJ, TILEI>, Dim<BFOLD>> out_bri(&bInfo, &bStorage, bSize);
BrickInfo<3> *bInfo_dev;
auto _bInfo_dev = movBrickInfo(bInfo, cudaMemcpyHostToDevice);
{
unsigned size = sizeof(BrickInfo<3>);
cudaMalloc(&bInfo_dev, size);
cudaMemcpy(bInfo_dev, &_bInfo_dev, size, cudaMemcpyHostToDevice);
}
copyBrick<3>({STRIDE, STRIDE, STRIDE}, in_arr, grid_ptr, in_bri);
BrickStorage *bStorage_dev;
BrickStorage _bStorage_dev = movBrickStorage(bStorage, cudaMemcpyHostToDevice);
{
unsigned size = sizeof(BrickStorage);
cudaMalloc(&bStorage_dev, size);
cudaMemcpy(bStorage_dev, &_bStorage_dev, size, cudaMemcpyHostToDevice);
}
auto compute = [&]() -> void {
Brick3D bIn(bInfo_dev, &_bStorage_dev, 0);
Brick3D bOut(bInfo_dev, &_bStorage_dev, bSize);
bIn.bStorage = bStorage_dev;
bOut.bStorage = bStorage_dev;
auto grid = (unsigned (*)[STRIDE/TILEJ][STRIDE/TILEI]) grid_dev;
dim3 block(N/TILEI, N/TILEJ, N/TILEK), thread(32);
brick_kernel<<< block, thread >>>(grid, bIn, bOut);
};
#include "cutiming.h"
cudaDeviceSynchronize();
cudaMemcpy(bStorage.dat, _bStorage_dev.dat, bStorage.chunks * bStorage.step * sizeof(bElem), cudaMemcpyDeviceToHost);
if (!compareBrick<3>({STRIDE, STRIDE, STRIDE}, out_arr, grid_ptr, out_bri))
return 1;
}
#endif
return 0;
}
|
cfebb994fe290b18051632a5796d3460d70de7b3.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <GL/glut.h>
#include <GL/gl.h>
#include <malloc.h>
#include <signal.h>
#include <hip/hip_runtime_api.h>
/******************************************************************************
Displays two grey scale images. On the left is an image that has come from an
image processing pipeline, just after colour thresholding. On the right is
the result of applying an edge detection convolution operator to the left
image. This program performs that convolution.
Things to note:
- A single unsigned char stores a pixel intensity value. 0 is black, 256 is
white.
- The colour mode used is GL_LUMINANCE. This uses a single number to
represent a pixel's intensity. In this case we want 256 shades of grey,
which is best stored in eight bits, so GL_UNSIGNED_BYTE is specified as
the pixel data type.
To compile adapt the code below wo match your filenames:
nvcc -o ip_coursework_014_CUDA 'ip_coursework_014 .cu' -lglut -lGL -lm
To run the program:
./ip_coursework_014_CUDA
Dr Kevan Buckley, University of Wolverhampton, 2018
******************************************************************************/
#define width 100
#define height 72
unsigned char data[width * height];
unsigned char image[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,0,0,255,255,255,255,255,255,0,0,0,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,0,
0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,255,255,
255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,0,0,0,255,255,255,0,0,
0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,0,0,0,0,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,
0,255,255,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,
255,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,0,0,0,0,255,0,0,0,0,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,
0,0,0,0,0,255,255,255,255,255,255,255,255,255,0,255,255,255,255,
255,255,255,255,255,255,255,255,0,0,0,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,0,0,255,0,0,0,0,0,255,255,255,0,255,255,0,0,255,0,
255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,
0,255,255,255,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,0,0,0,0,255,0,0,0,0,0,255,255,0,255,0,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,
0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,0,0,0,0,255,255,255,255,0,255,255,255,255,255,0,0,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,
255,255,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,
0,255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,
0,0,0,0,0,255,0,0,0,0,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,0,0,0,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,
0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,
0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,
0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,0,0,0,0,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,0,0,0,0,0,0,0,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,0,0,0,0,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,0,0,0,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,255,255,255,
255,0,0,0,255,255,255,0,0,0,0,255,0,0,255,255,255,0,0,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,
0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,
0,0,0,255,255,0,0,0,0,0,255,0,0,0,0,0,0,0,0,
255,255,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,0,0,0,255,0,0,0,0,0,0,255,0,0,0,0,0,255,
0,0,0,0,0,255,255,0,0,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,0,0,0,0,0,0,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,0,0,0,255,255,255,255,255,0,0,255,255,0,
0,0,0,0,255,0,0,0,0,0,255,255,0,0,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,0,0,0,255,255,255,255,255,
0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,
0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,
255,255,255,0,255,255,0,0,0,0,255,0,0,255,255,0,0,0,0,
0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,0,0,0,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,0,0,0,0,0,0,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,0,0,0,0,0,0,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,
0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,255,
255,255,255,255,255,255,255,0,0,0,0,0,0,0,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,0,0,0,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,0,
255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,0,0,
0,255,255,255,0,0,0,0,255,255,0,0,0,0,0,0,0,255,255,
0,255,255,0,0,0,255,0,0,255,255,255,255,255,255,255,255,0,0,
0,0,0,0,0,255,255,255,255,255,255,255,255,0,0,0,0,0,255,
255,0,0,255,255,0,0,0,0,0,255,255,0,0,255,0,0,0,255,
0,0,255,0,0,0,255,255,255,255,0,0,0,0,0,0,255,255,255,
255,0,0,0,0,0,0,255,0,0,0,0,0,255,0,0,0,0,0,
0,0,0,255,0,0,255,0,0,0,255,255,0,0,0,255,255,255,255,
255,255,255,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,0,
0,255,255,0,0,255,0,0,255,255,0,0,255,0,0,255,0,0,0,
0,0,0,0,0,0,0,255,0,0,255,255,255,255,0,0,255,0,0,
0,0,255,255,255,255,0,0,255,255,0,0,0,0,0,0,0,0,0,
0,0,255,0,0,0,0,0,255,0,0,255,0,0,255,255,0,0,0,
0,255,255,255,255,255,255,255,0,0,0,0,0,0,0,255,255,255,255,
255,255,255,0,0,255,255,255,255,255,255,0,0,255,255,0,0,255,0,
0,255,0,0,0,0,0,0,0,0,0,0,255,0,0,255,255,255,255,
0,0,0,0,0,0,0,255,255,255,0,0,0,255,255,255,255,255,0,
0,0,0,0,0,0,0,255,0,0,0,0,0,255,0,0,255,0,0,
255,255,0,0,0,0,255,255,255,255,255,255,0,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,0,0,255,
0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,
0,255,255,255,255,255,0,0,0,255,255,255,255,255,255,0,0,0,255,
255,255,255,255,0,0,0,0,0,255,0,0,255,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,255,0,0,255,255,255,255,255,255,0,0,
0,0,0,0,0,0,255,255,255,255,255,255,255,0,0,255,255,255,255,
255,255,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,255,0,
0,0,0,0,0,0,255,255,255,255,255,0,0,0,0,255,255,255,255,
255,0,0,0,255,255,0,0,255,255,0,0,0,0,255,0,0,255,0,
0,255,0,0,0,0,255,0,0,0,0,0,0,255,255,0,255,255,255,
255,255,255,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,
0,0,255,255,0,0,255,255,255,255,255,255,0,0,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,
0,255,255,255,255,255,255,0,0,0,0,0,0,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,
255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,255,255,255,
255,255,255,255,255,255,0,0,0,0,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,0,0,0,0,255,255,255,255,255,255,255,0,0,0,0,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,
0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,
0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,0,255,255,255,255,255,255,255,255,0,0,0,0,0,0,
0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,0,0,255,255,255,0,0,255,255,0,0,255,255,255,0,
0,0,255,255,0,255,255,255,0,255,255,0,0,255,0,0,0,0,0,
0,0,255,0,0,255,0,0,255,0,0,0,0,0,255,255,0,0,0,
255,0,0,0,0,255,0,0,255,0,0,255,255,255,255,255,255,255,0,
0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,0,
0,0,255,0,0,0,255,255,0,0,0,0,255,0,0,0,255,0,0,
0,255,255,0,0,0,0,255,255,0,0,255,0,0,0,255,0,0,0,
0,0,0,0,255,0,0,255,0,0,255,0,0,255,0,0,0,0,0,
255,0,0,0,255,0,0,0,0,0,255,0,0,255,0,0,255,255,255,
255,255,255,255,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,
255,255,255,255,0,0,0,255,0,0,0,255,0,0,255,0,0,0,0,
0,255,255,0,0,0,0,255,0,0,255,0,255,255,0,0,255,0,0,
255,255,0,0,0,0,0,0,0,255,0,0,255,0,0,255,0,0,255,
0,0,255,255,0,0,0,0,255,255,0,0,255,0,0,255,0,255,0,
0,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,255,
255,255,255,255,255,255,255,255,255,0,0,0,255,0,0,0,255,0,0,
255,255,0,0,0,0,255,255,0,0,0,0,255,0,0,0,0,0,255,
0,0,255,0,0,255,255,0,0,255,0,0,0,0,255,0,0,255,0,
0,255,0,0,255,0,0,255,0,0,255,0,0,255,255,0,0,255,0,
0,0,0,0,0,0,255,255,255,255,255,255,255,255,0,0,0,0,0,
0,0,0,0,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,
255,0,255,0,0,0,0,0,255,0,0,255,0,0,255,0,0,255,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,
255,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,
255,0,0,0,0,255,0,0,0,0,0,255,255,255,255,255,255,255,255,
0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,0,
0,255,0,0,0,255,0,0,255,0,0,0,0,255,0,0,255,0,0,
255,255,0,255,0,0,0,0,255,255,0,0,0,0,0,0,0,0,255,
255,0,0,0,255,255,0,0,0,0,0,0,0,255,255,0,0,0,0,
255,255,0,0,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,0,0,0,0,0,0,0,0,0,255,255,255,255,255,
255,255,255,255,0,0,255,0,0,0,255,0,0,255,255,255,255,255,255,
0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,
255,255,255,255,255,255,255,255,255,0,255,255,255,0,255,255,0,0,255,
255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,
0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
};
__global__ void detect_edges(unsigned char *in, unsigned char *out) {
unsigned int i = blockIdx.x ;
int x, y; // the pixel of interest
int b, d, f, h; // the pixels adjacent to x,y used for the calculation
int r; // the result of calculate
y = i / 100;
x = i - (100 * y);
if (x == 0 || y == 0 || x == width - 1 || y == height - 1) {
out[i] = 0;
} else {
b = i + width;
d = i - 1;
f = i + 1;
h = i - width;
r = (in[i] * 4) + (in[b] * -1) + (in[d] * -1) + (in[f] * -1)
+ (in[h] * -1);
if (r > 0) { // if the result is positive this is an edge pixel
out[i] = 255;
} else {
out[i] = 0;
}
}
}
void tidy_and_exit() {
exit(0);
}
void sigint_callback(int signal_number){
printf("\nInterrupt from keyboard\n");
tidy_and_exit();
}
static void display() {
glClear(GL_COLOR_BUFFER_BIT);
glRasterPos4i(-1, -1, 0, 1);
glDrawPixels(width, height, GL_LUMINANCE, GL_UNSIGNED_BYTE, image);
glRasterPos4i(0, -1, 0, 1);
glDrawPixels(width, height, GL_LUMINANCE, GL_UNSIGNED_BYTE, data);
glFlush();
}
static void key_pressed(unsigned char key, int x, int y) {
switch(key){
case 27: // escape
tidy_and_exit();
break;
default:
printf("\nPress escape to exit\n");
break;
}
}
int time_difference(struct timespec *start, struct timespec *finish,
long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 ) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main(int argc, char **argv) {
signal(SIGINT, sigint_callback);
printf("image dimensions %dx%d\n", width, height);
unsigned char *device_data;
unsigned char *device_image;
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
hipMalloc((void**)&device_data, sizeof(unsigned char) * (width * height));
hipMalloc((void**)&device_image, sizeof(unsigned char) * (width * height) );
hipMemcpy(device_image, &image, sizeof(unsigned char) * (width * height), hipMemcpyHostToDevice);
hipMemcpy(&device_data, &data, sizeof(unsigned char) * (width * height), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( detect_edges) , dim3(7200), dim3(1), 0, 0, device_image, device_data);
hipDeviceSynchronize();
hipMemcpy(&data, device_data, sizeof(unsigned char) * (width * height), hipMemcpyDeviceToHost);
hipMemcpy(&image, &device_image, sizeof(unsigned char) * (width * height), hipMemcpyDeviceToHost);
hipFree(&device_image);
hipFree(&device_data);
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed,
(time_elapsed/1.0e9));
glutInit(&argc, argv);
glutInitWindowSize(width * 2,height);
glutInitDisplayMode(GLUT_SINGLE | GLUT_LUMINANCE);
glutCreateWindow("6CS005 Image Progessing Courework");
glutDisplayFunc(display);
glutKeyboardFunc(key_pressed);
glClearColor(0.0, 1.0, 0.0, 1.0);
glutMainLoop();
tidy_and_exit();
return 0;
}
| cfebb994fe290b18051632a5796d3460d70de7b3.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <GL/glut.h>
#include <GL/gl.h>
#include <malloc.h>
#include <signal.h>
#include <cuda_runtime_api.h>
/******************************************************************************
Displays two grey scale images. On the left is an image that has come from an
image processing pipeline, just after colour thresholding. On the right is
the result of applying an edge detection convolution operator to the left
image. This program performs that convolution.
Things to note:
- A single unsigned char stores a pixel intensity value. 0 is black, 256 is
white.
- The colour mode used is GL_LUMINANCE. This uses a single number to
represent a pixel's intensity. In this case we want 256 shades of grey,
which is best stored in eight bits, so GL_UNSIGNED_BYTE is specified as
the pixel data type.
To compile adapt the code below wo match your filenames:
nvcc -o ip_coursework_014_CUDA 'ip_coursework_014 .cu' -lglut -lGL -lm
To run the program:
./ip_coursework_014_CUDA
Dr Kevan Buckley, University of Wolverhampton, 2018
******************************************************************************/
#define width 100
#define height 72
unsigned char data[width * height];
unsigned char image[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,0,0,255,255,255,255,255,255,0,0,0,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,0,
0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,255,255,
255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,0,0,0,255,255,255,0,0,
0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,0,0,0,0,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,
0,255,255,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,
255,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,0,0,0,0,255,0,0,0,0,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,
0,0,0,0,0,255,255,255,255,255,255,255,255,255,0,255,255,255,255,
255,255,255,255,255,255,255,255,0,0,0,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,0,0,255,0,0,0,0,0,255,255,255,0,255,255,0,0,255,0,
255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,
0,255,255,255,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,0,0,0,0,255,0,0,0,0,0,255,255,0,255,0,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,
0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,0,0,0,0,255,255,255,255,0,255,255,255,255,255,0,0,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,
255,255,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,
0,255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,
0,0,0,0,0,255,0,0,0,0,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,0,0,0,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,
0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,
0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,
0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,0,0,0,0,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,0,0,0,0,0,0,0,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,0,0,0,0,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,0,0,0,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,255,255,255,
255,0,0,0,255,255,255,0,0,0,0,255,0,0,255,255,255,0,0,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,
0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,
0,0,0,255,255,0,0,0,0,0,255,0,0,0,0,0,0,0,0,
255,255,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,0,0,0,255,0,0,0,0,0,0,255,0,0,0,0,0,255,
0,0,0,0,0,255,255,0,0,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,0,0,0,0,0,0,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,0,0,0,255,255,255,255,255,0,0,255,255,0,
0,0,0,0,255,0,0,0,0,0,255,255,0,0,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,0,0,0,255,255,255,255,255,
0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,
0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,
255,255,255,0,255,255,0,0,0,0,255,0,0,255,255,0,0,0,0,
0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,0,0,0,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,0,0,0,0,0,0,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,0,0,0,0,0,0,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,
0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,255,
255,255,255,255,255,255,255,0,0,0,0,0,0,0,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,0,0,0,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,0,
255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,0,0,
0,255,255,255,0,0,0,0,255,255,0,0,0,0,0,0,0,255,255,
0,255,255,0,0,0,255,0,0,255,255,255,255,255,255,255,255,0,0,
0,0,0,0,0,255,255,255,255,255,255,255,255,0,0,0,0,0,255,
255,0,0,255,255,0,0,0,0,0,255,255,0,0,255,0,0,0,255,
0,0,255,0,0,0,255,255,255,255,0,0,0,0,0,0,255,255,255,
255,0,0,0,0,0,0,255,0,0,0,0,0,255,0,0,0,0,0,
0,0,0,255,0,0,255,0,0,0,255,255,0,0,0,255,255,255,255,
255,255,255,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,0,
0,255,255,0,0,255,0,0,255,255,0,0,255,0,0,255,0,0,0,
0,0,0,0,0,0,0,255,0,0,255,255,255,255,0,0,255,0,0,
0,0,255,255,255,255,0,0,255,255,0,0,0,0,0,0,0,0,0,
0,0,255,0,0,0,0,0,255,0,0,255,0,0,255,255,0,0,0,
0,255,255,255,255,255,255,255,0,0,0,0,0,0,0,255,255,255,255,
255,255,255,0,0,255,255,255,255,255,255,0,0,255,255,0,0,255,0,
0,255,0,0,0,0,0,0,0,0,0,0,255,0,0,255,255,255,255,
0,0,0,0,0,0,0,255,255,255,0,0,0,255,255,255,255,255,0,
0,0,0,0,0,0,0,255,0,0,0,0,0,255,0,0,255,0,0,
255,255,0,0,0,0,255,255,255,255,255,255,0,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,0,0,255,
0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,
0,255,255,255,255,255,0,0,0,255,255,255,255,255,255,0,0,0,255,
255,255,255,255,0,0,0,0,0,255,0,0,255,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,255,0,0,255,255,255,255,255,255,0,0,
0,0,0,0,0,0,255,255,255,255,255,255,255,0,0,255,255,255,255,
255,255,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,255,0,
0,0,0,0,0,0,255,255,255,255,255,0,0,0,0,255,255,255,255,
255,0,0,0,255,255,0,0,255,255,0,0,0,0,255,0,0,255,0,
0,255,0,0,0,0,255,0,0,0,0,0,0,255,255,0,255,255,255,
255,255,255,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,
0,0,255,255,0,0,255,255,255,255,255,255,0,0,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,
0,255,255,255,255,255,255,0,0,0,0,0,0,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,
255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,255,255,255,
255,255,255,255,255,255,0,0,0,0,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,0,0,0,0,255,255,255,255,255,255,255,0,0,0,0,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,
0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,
0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,0,255,255,255,255,255,255,255,255,0,0,0,0,0,0,
0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,0,0,255,255,255,0,0,255,255,0,0,255,255,255,0,
0,0,255,255,0,255,255,255,0,255,255,0,0,255,0,0,0,0,0,
0,0,255,0,0,255,0,0,255,0,0,0,0,0,255,255,0,0,0,
255,0,0,0,0,255,0,0,255,0,0,255,255,255,255,255,255,255,0,
0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,0,
0,0,255,0,0,0,255,255,0,0,0,0,255,0,0,0,255,0,0,
0,255,255,0,0,0,0,255,255,0,0,255,0,0,0,255,0,0,0,
0,0,0,0,255,0,0,255,0,0,255,0,0,255,0,0,0,0,0,
255,0,0,0,255,0,0,0,0,0,255,0,0,255,0,0,255,255,255,
255,255,255,255,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,
255,255,255,255,0,0,0,255,0,0,0,255,0,0,255,0,0,0,0,
0,255,255,0,0,0,0,255,0,0,255,0,255,255,0,0,255,0,0,
255,255,0,0,0,0,0,0,0,255,0,0,255,0,0,255,0,0,255,
0,0,255,255,0,0,0,0,255,255,0,0,255,0,0,255,0,255,0,
0,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,255,
255,255,255,255,255,255,255,255,255,0,0,0,255,0,0,0,255,0,0,
255,255,0,0,0,0,255,255,0,0,0,0,255,0,0,0,0,0,255,
0,0,255,0,0,255,255,0,0,255,0,0,0,0,255,0,0,255,0,
0,255,0,0,255,0,0,255,0,0,255,0,0,255,255,0,0,255,0,
0,0,0,0,0,0,255,255,255,255,255,255,255,255,0,0,0,0,0,
0,0,0,0,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,
255,0,255,0,0,0,0,0,255,0,0,255,0,0,255,0,0,255,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,
255,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,
255,0,0,0,0,255,0,0,0,0,0,255,255,255,255,255,255,255,255,
0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,0,
0,255,0,0,0,255,0,0,255,0,0,0,0,255,0,0,255,0,0,
255,255,0,255,0,0,0,0,255,255,0,0,0,0,0,0,0,0,255,
255,0,0,0,255,255,0,0,0,0,0,0,0,255,255,0,0,0,0,
255,255,0,0,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,0,0,0,0,0,0,0,0,0,255,255,255,255,255,
255,255,255,255,0,0,255,0,0,0,255,0,0,255,255,255,255,255,255,
0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,
255,255,255,255,255,255,255,255,255,0,255,255,255,0,255,255,0,0,255,
255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,
0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
};
__global__ void detect_edges(unsigned char *in, unsigned char *out) {
unsigned int i = blockIdx.x ;
int x, y; // the pixel of interest
int b, d, f, h; // the pixels adjacent to x,y used for the calculation
int r; // the result of calculate
y = i / 100;
x = i - (100 * y);
if (x == 0 || y == 0 || x == width - 1 || y == height - 1) {
out[i] = 0;
} else {
b = i + width;
d = i - 1;
f = i + 1;
h = i - width;
r = (in[i] * 4) + (in[b] * -1) + (in[d] * -1) + (in[f] * -1)
+ (in[h] * -1);
if (r > 0) { // if the result is positive this is an edge pixel
out[i] = 255;
} else {
out[i] = 0;
}
}
}
void tidy_and_exit() {
exit(0);
}
void sigint_callback(int signal_number){
printf("\nInterrupt from keyboard\n");
tidy_and_exit();
}
static void display() {
glClear(GL_COLOR_BUFFER_BIT);
glRasterPos4i(-1, -1, 0, 1);
glDrawPixels(width, height, GL_LUMINANCE, GL_UNSIGNED_BYTE, image);
glRasterPos4i(0, -1, 0, 1);
glDrawPixels(width, height, GL_LUMINANCE, GL_UNSIGNED_BYTE, data);
glFlush();
}
static void key_pressed(unsigned char key, int x, int y) {
switch(key){
case 27: // escape
tidy_and_exit();
break;
default:
printf("\nPress escape to exit\n");
break;
}
}
int time_difference(struct timespec *start, struct timespec *finish,
long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 ) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main(int argc, char **argv) {
signal(SIGINT, sigint_callback);
printf("image dimensions %dx%d\n", width, height);
unsigned char *device_data;
unsigned char *device_image;
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
cudaMalloc((void**)&device_data, sizeof(unsigned char) * (width * height));
cudaMalloc((void**)&device_image, sizeof(unsigned char) * (width * height) );
cudaMemcpy(device_image, &image, sizeof(unsigned char) * (width * height), cudaMemcpyHostToDevice);
cudaMemcpy(&device_data, &data, sizeof(unsigned char) * (width * height), cudaMemcpyHostToDevice);
detect_edges <<<7200, 1>>>(device_image, device_data);
cudaThreadSynchronize();
cudaMemcpy(&data, device_data, sizeof(unsigned char) * (width * height), cudaMemcpyDeviceToHost);
cudaMemcpy(&image, &device_image, sizeof(unsigned char) * (width * height), cudaMemcpyDeviceToHost);
cudaFree(&device_image);
cudaFree(&device_data);
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed,
(time_elapsed/1.0e9));
glutInit(&argc, argv);
glutInitWindowSize(width * 2,height);
glutInitDisplayMode(GLUT_SINGLE | GLUT_LUMINANCE);
glutCreateWindow("6CS005 Image Progessing Courework");
glutDisplayFunc(display);
glutKeyboardFunc(key_pressed);
glClearColor(0.0, 1.0, 0.0, 1.0);
glutMainLoop();
tidy_and_exit();
return 0;
}
|
ece8853a413121bba1fa56e147cadf0e6760ca66.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#define PIXELSIZE 4
double* ClusterCore;
unsigned int* ClusterPixelCount;
double* ClusterColorSum;
__constant__ double DevClusterCore[4*32];
__global__ void PixelToCluster(unsigned char* Image, int ClusterCount, int Width, int Height, int* LastIter)
{
int ElementsInStr=PIXELSIZE*Width;
int Distance=0;
int SelectedCluster=0;
int a,b,c;
int i,j;
for(int offset=4*(blockIdx.x*blockDim.x+threadIdx.x); offset<Height*ElementsInStr; offset+=4*(gridDim.x*blockDim.x))
{
i=offset/ElementsInStr;
j=offset%ElementsInStr;
a=(DevClusterCore[0]-Image[i*ElementsInStr+j])*(DevClusterCore[0]-Image[i*ElementsInStr+j]);
b=(DevClusterCore[1]-Image[i*ElementsInStr+j+1])*(DevClusterCore[1]-Image[i*ElementsInStr+j+1]);
c=(DevClusterCore[2]-Image[i*ElementsInStr+j+2])*(DevClusterCore[2]-Image[i*ElementsInStr+j+2]);
Distance=a+b+c;
SelectedCluster=0;
for(int k=1; k<ClusterCount; k++)
{
a=(DevClusterCore[k*PIXELSIZE]-Image[i*ElementsInStr+j])*(DevClusterCore[k*PIXELSIZE]-Image[i*ElementsInStr+j]);
b=(DevClusterCore[k*PIXELSIZE+1]-Image[i*ElementsInStr+j+1])*(DevClusterCore[k*PIXELSIZE+1]-Image[i*ElementsInStr+j+1]);
c=(DevClusterCore[k*PIXELSIZE+2]-Image[i*ElementsInStr+j+2])*(DevClusterCore[k*PIXELSIZE+2]-Image[i*ElementsInStr+j+2]);
if((a+b+c)<Distance)
{
Distance=a+b+c;
SelectedCluster=k;
}
}
if(Image[i*ElementsInStr+j+3]!=SelectedCluster) (*LastIter)=0;
Image[i*ElementsInStr+j+3]=SelectedCluster;
}
}
__host__ void ClusterDustribution(unsigned char* Image, int ClusterCount, int Width, int Height, int* LastIter)
{
int ElementsInStr=PIXELSIZE*Width;
int Distance=0;
int SelectedCluster=0;
int a,b,c;
*LastIter=1;
for(int i=0; i<Height; i++)
{
for(int j=0; j<ElementsInStr; j+=PIXELSIZE)
{
a=(ClusterCore[0]-Image[i*ElementsInStr+j])*(ClusterCore[0]-Image[i*ElementsInStr+j]);
b=(ClusterCore[1]-Image[i*ElementsInStr+j+1])*(ClusterCore[1]-Image[i*ElementsInStr+j+1]);
c=(ClusterCore[2]-Image[i*ElementsInStr+j+2])*(ClusterCore[2]-Image[i*ElementsInStr+j+2]);
Distance=a+b+c;
SelectedCluster=0;
for(int k=1; k<ClusterCount; k++)
{
a=(ClusterCore[k*PIXELSIZE]-Image[i*ElementsInStr+j])*(ClusterCore[k*PIXELSIZE]-Image[i*ElementsInStr+j]);
b=(ClusterCore[k*PIXELSIZE+1]-Image[i*ElementsInStr+j+1])*(ClusterCore[k*PIXELSIZE+1]-Image[i*ElementsInStr+j+1]);
c=(ClusterCore[k*PIXELSIZE+2]-Image[i*ElementsInStr+j+2])*(ClusterCore[k*PIXELSIZE+2]-Image[i*ElementsInStr+j+2]);
if((a+b+c)<Distance)
{
Distance=a+b+c;
SelectedCluster=k;
}
}
if(Image[i*ElementsInStr+j+3]!=SelectedCluster) *LastIter=0;
Image[i*ElementsInStr+j+3]=SelectedCluster;
}
}
}
__host__ void ClusterOffset(unsigned char* Image, int ClusterCount, int Width, int Height)
{
for(int i=0; i<ClusterCount; i++)
ClusterPixelCount[i]=0;
for(int i=0; i<4*ClusterCount; i++)
ClusterColorSum[i]=0;
int ElementsInStr=PIXELSIZE*Width;
int ClusterNum=0;
for(int i=0; i<Height; i++)
{
for(int j=0; j<ElementsInStr; j+=PIXELSIZE)
{
ClusterNum=Image[i*ElementsInStr+j+3];
ClusterPixelCount[ClusterNum]++;
ClusterColorSum[ClusterNum*PIXELSIZE]+=Image[i*ElementsInStr+j];
ClusterColorSum[ClusterNum*PIXELSIZE+1]+=Image[i*ElementsInStr+j+1];
ClusterColorSum[ClusterNum*PIXELSIZE+2]+=Image[i*ElementsInStr+j+2];
}
}
for(int i=0; i<ClusterCount; i++)
{
ClusterCore[i*PIXELSIZE]=ClusterColorSum[i*PIXELSIZE]/ClusterPixelCount[i];
ClusterCore[i*PIXELSIZE+1]=ClusterColorSum[i*PIXELSIZE+1]/ClusterPixelCount[i];
ClusterCore[i*PIXELSIZE+2]=ClusterColorSum[i*PIXELSIZE+2]/ClusterPixelCount[i];
}
}
int main()
{
char InPath[256];
char OutPath[256];
scanf("%s", InPath);
FILE* InPut = fopen(InPath, "rb");
if (InPut == NULL)
{
fprintf(stderr, "Cannot open in.data");
exit(0);
}
scanf("%s", OutPath);
FILE* OutPut = fopen(OutPath, "wb");
if (OutPut == NULL)
{
fprintf(stderr, "Cannot create out.data");
exit(0);
}
int ClusterNumber;
scanf("%d", &ClusterNumber);
int* Xcoords = (int*)malloc(ClusterNumber*sizeof(int));
int* Ycoords = (int*)malloc(ClusterNumber*sizeof(int));
for(int i=0; i<ClusterNumber; i++)
{
scanf("%d", &Ycoords[i]);
scanf("%d", &Xcoords[i]);
}
ClusterCore = (double*)malloc(4*ClusterNumber*sizeof(double));
ClusterPixelCount = (unsigned int*)malloc(ClusterNumber*sizeof(unsigned int));
ClusterColorSum = (double*)malloc(4*ClusterNumber*sizeof(double));
int Width;
int Height;
fread(&Width, sizeof(int), 1, InPut);
fread(&Height, sizeof(int), 1, InPut);
unsigned char* Image = (unsigned char*)malloc(4*Width*Height*sizeof(unsigned char));
fread(Image, 4*Width*Height*sizeof(unsigned char), 1, InPut);
unsigned char* Dev_Image;
hipMalloc((void**)&Dev_Image, 4*Width*Height*sizeof(unsigned char));
hipMemcpy(Dev_Image, Image, 4*Width*Height*sizeof(unsigned char), hipMemcpyHostToDevice);
for(int i=0; i<ClusterNumber; i++)
{
ClusterCore[i*PIXELSIZE]=Image[4*Width*Xcoords[i]+PIXELSIZE*Ycoords[i]];
ClusterCore[i*PIXELSIZE+1]=Image[4*Width*Xcoords[i]+PIXELSIZE*Ycoords[i]+1];
ClusterCore[i*PIXELSIZE+2]=Image[4*Width*Xcoords[i]+PIXELSIZE*Ycoords[i]+2];
ClusterCore[i*PIXELSIZE+3]=0;
}
int* NotLastIter;
int* HostNotLastIter = (int*)malloc(sizeof(int));
*HostNotLastIter=1;
hipMalloc((void**)&NotLastIter, sizeof(int));
hipMemcpy(NotLastIter, HostNotLastIter, sizeof(int), hipMemcpyHostToDevice);
hipMemcpyToSymbol(DevClusterCore, ClusterCore, 4*ClusterNumber*sizeof(double));
while(1)
{
hipLaunchKernelGGL(( PixelToCluster), dim3(128), dim3(512), 0, 0, Dev_Image, ClusterNumber, Width, Height, NotLastIter);
hipMemcpy(HostNotLastIter, NotLastIter, sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(Image, Dev_Image, 4*Width*Height*sizeof(unsigned char), hipMemcpyDeviceToHost);
if((*HostNotLastIter)==1) break;
ClusterOffset(Image, ClusterNumber, Width, Height);
hipMemcpyToSymbol(DevClusterCore, ClusterCore, 4*ClusterNumber*sizeof(double));
*HostNotLastIter=1;
hipMemcpy(NotLastIter, HostNotLastIter, sizeof(int), hipMemcpyHostToDevice);
}
fwrite(&Width, sizeof(int), 1 ,OutPut);
fwrite(&Height, sizeof(int), 1, OutPut);
fwrite(Image, 4*Width*Height*sizeof(unsigned char),1, OutPut);
hipFree(NotLastIter);
hipFree(Dev_Image);
free(Image);
free(ClusterColorSum);
free(ClusterCore);
free(ClusterPixelCount);
return 0;
}
| ece8853a413121bba1fa56e147cadf0e6760ca66.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#define PIXELSIZE 4
double* ClusterCore;
unsigned int* ClusterPixelCount;
double* ClusterColorSum;
__constant__ double DevClusterCore[4*32];
__global__ void PixelToCluster(unsigned char* Image, int ClusterCount, int Width, int Height, int* LastIter)
{
int ElementsInStr=PIXELSIZE*Width;
int Distance=0;
int SelectedCluster=0;
int a,b,c;
int i,j;
for(int offset=4*(blockIdx.x*blockDim.x+threadIdx.x); offset<Height*ElementsInStr; offset+=4*(gridDim.x*blockDim.x))
{
i=offset/ElementsInStr;
j=offset%ElementsInStr;
a=(DevClusterCore[0]-Image[i*ElementsInStr+j])*(DevClusterCore[0]-Image[i*ElementsInStr+j]);
b=(DevClusterCore[1]-Image[i*ElementsInStr+j+1])*(DevClusterCore[1]-Image[i*ElementsInStr+j+1]);
c=(DevClusterCore[2]-Image[i*ElementsInStr+j+2])*(DevClusterCore[2]-Image[i*ElementsInStr+j+2]);
Distance=a+b+c;
SelectedCluster=0;
for(int k=1; k<ClusterCount; k++)
{
a=(DevClusterCore[k*PIXELSIZE]-Image[i*ElementsInStr+j])*(DevClusterCore[k*PIXELSIZE]-Image[i*ElementsInStr+j]);
b=(DevClusterCore[k*PIXELSIZE+1]-Image[i*ElementsInStr+j+1])*(DevClusterCore[k*PIXELSIZE+1]-Image[i*ElementsInStr+j+1]);
c=(DevClusterCore[k*PIXELSIZE+2]-Image[i*ElementsInStr+j+2])*(DevClusterCore[k*PIXELSIZE+2]-Image[i*ElementsInStr+j+2]);
if((a+b+c)<Distance)
{
Distance=a+b+c;
SelectedCluster=k;
}
}
if(Image[i*ElementsInStr+j+3]!=SelectedCluster) (*LastIter)=0;
Image[i*ElementsInStr+j+3]=SelectedCluster;
}
}
__host__ void ClusterDustribution(unsigned char* Image, int ClusterCount, int Width, int Height, int* LastIter)
{
int ElementsInStr=PIXELSIZE*Width;
int Distance=0;
int SelectedCluster=0;
int a,b,c;
*LastIter=1;
for(int i=0; i<Height; i++)
{
for(int j=0; j<ElementsInStr; j+=PIXELSIZE)
{
a=(ClusterCore[0]-Image[i*ElementsInStr+j])*(ClusterCore[0]-Image[i*ElementsInStr+j]);
b=(ClusterCore[1]-Image[i*ElementsInStr+j+1])*(ClusterCore[1]-Image[i*ElementsInStr+j+1]);
c=(ClusterCore[2]-Image[i*ElementsInStr+j+2])*(ClusterCore[2]-Image[i*ElementsInStr+j+2]);
Distance=a+b+c;
SelectedCluster=0;
for(int k=1; k<ClusterCount; k++)
{
a=(ClusterCore[k*PIXELSIZE]-Image[i*ElementsInStr+j])*(ClusterCore[k*PIXELSIZE]-Image[i*ElementsInStr+j]);
b=(ClusterCore[k*PIXELSIZE+1]-Image[i*ElementsInStr+j+1])*(ClusterCore[k*PIXELSIZE+1]-Image[i*ElementsInStr+j+1]);
c=(ClusterCore[k*PIXELSIZE+2]-Image[i*ElementsInStr+j+2])*(ClusterCore[k*PIXELSIZE+2]-Image[i*ElementsInStr+j+2]);
if((a+b+c)<Distance)
{
Distance=a+b+c;
SelectedCluster=k;
}
}
if(Image[i*ElementsInStr+j+3]!=SelectedCluster) *LastIter=0;
Image[i*ElementsInStr+j+3]=SelectedCluster;
}
}
}
__host__ void ClusterOffset(unsigned char* Image, int ClusterCount, int Width, int Height)
{
for(int i=0; i<ClusterCount; i++)
ClusterPixelCount[i]=0;
for(int i=0; i<4*ClusterCount; i++)
ClusterColorSum[i]=0;
int ElementsInStr=PIXELSIZE*Width;
int ClusterNum=0;
for(int i=0; i<Height; i++)
{
for(int j=0; j<ElementsInStr; j+=PIXELSIZE)
{
ClusterNum=Image[i*ElementsInStr+j+3];
ClusterPixelCount[ClusterNum]++;
ClusterColorSum[ClusterNum*PIXELSIZE]+=Image[i*ElementsInStr+j];
ClusterColorSum[ClusterNum*PIXELSIZE+1]+=Image[i*ElementsInStr+j+1];
ClusterColorSum[ClusterNum*PIXELSIZE+2]+=Image[i*ElementsInStr+j+2];
}
}
for(int i=0; i<ClusterCount; i++)
{
ClusterCore[i*PIXELSIZE]=ClusterColorSum[i*PIXELSIZE]/ClusterPixelCount[i];
ClusterCore[i*PIXELSIZE+1]=ClusterColorSum[i*PIXELSIZE+1]/ClusterPixelCount[i];
ClusterCore[i*PIXELSIZE+2]=ClusterColorSum[i*PIXELSIZE+2]/ClusterPixelCount[i];
}
}
int main()
{
char InPath[256];
char OutPath[256];
scanf("%s", InPath);
FILE* InPut = fopen(InPath, "rb");
if (InPut == NULL)
{
fprintf(stderr, "Cannot open in.data");
exit(0);
}
scanf("%s", OutPath);
FILE* OutPut = fopen(OutPath, "wb");
if (OutPut == NULL)
{
fprintf(stderr, "Cannot create out.data");
exit(0);
}
int ClusterNumber;
scanf("%d", &ClusterNumber);
int* Xcoords = (int*)malloc(ClusterNumber*sizeof(int));
int* Ycoords = (int*)malloc(ClusterNumber*sizeof(int));
for(int i=0; i<ClusterNumber; i++)
{
scanf("%d", &Ycoords[i]);
scanf("%d", &Xcoords[i]);
}
ClusterCore = (double*)malloc(4*ClusterNumber*sizeof(double));
ClusterPixelCount = (unsigned int*)malloc(ClusterNumber*sizeof(unsigned int));
ClusterColorSum = (double*)malloc(4*ClusterNumber*sizeof(double));
int Width;
int Height;
fread(&Width, sizeof(int), 1, InPut);
fread(&Height, sizeof(int), 1, InPut);
unsigned char* Image = (unsigned char*)malloc(4*Width*Height*sizeof(unsigned char));
fread(Image, 4*Width*Height*sizeof(unsigned char), 1, InPut);
unsigned char* Dev_Image;
cudaMalloc((void**)&Dev_Image, 4*Width*Height*sizeof(unsigned char));
cudaMemcpy(Dev_Image, Image, 4*Width*Height*sizeof(unsigned char), cudaMemcpyHostToDevice);
for(int i=0; i<ClusterNumber; i++)
{
ClusterCore[i*PIXELSIZE]=Image[4*Width*Xcoords[i]+PIXELSIZE*Ycoords[i]];
ClusterCore[i*PIXELSIZE+1]=Image[4*Width*Xcoords[i]+PIXELSIZE*Ycoords[i]+1];
ClusterCore[i*PIXELSIZE+2]=Image[4*Width*Xcoords[i]+PIXELSIZE*Ycoords[i]+2];
ClusterCore[i*PIXELSIZE+3]=0;
}
int* NotLastIter;
int* HostNotLastIter = (int*)malloc(sizeof(int));
*HostNotLastIter=1;
cudaMalloc((void**)&NotLastIter, sizeof(int));
cudaMemcpy(NotLastIter, HostNotLastIter, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(DevClusterCore, ClusterCore, 4*ClusterNumber*sizeof(double));
while(1)
{
PixelToCluster<<<128, 512>>>(Dev_Image, ClusterNumber, Width, Height, NotLastIter);
cudaMemcpy(HostNotLastIter, NotLastIter, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(Image, Dev_Image, 4*Width*Height*sizeof(unsigned char), cudaMemcpyDeviceToHost);
if((*HostNotLastIter)==1) break;
ClusterOffset(Image, ClusterNumber, Width, Height);
cudaMemcpyToSymbol(DevClusterCore, ClusterCore, 4*ClusterNumber*sizeof(double));
*HostNotLastIter=1;
cudaMemcpy(NotLastIter, HostNotLastIter, sizeof(int), cudaMemcpyHostToDevice);
}
fwrite(&Width, sizeof(int), 1 ,OutPut);
fwrite(&Height, sizeof(int), 1, OutPut);
fwrite(Image, 4*Width*Height*sizeof(unsigned char),1, OutPut);
cudaFree(NotLastIter);
cudaFree(Dev_Image);
free(Image);
free(ClusterColorSum);
free(ClusterCore);
free(ClusterPixelCount);
return 0;
}
|
7a4da5ed29b914620e36ea67f46e798e0b93b43d.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THH/generic/THHTensorMathPointwise.hip"
#else
#include <ATen/MemoryOverlap.h>
#include <ATen/NamedTensorUtils.h>
#include <ATen/core/EnableNamedTensor.h>
void THCTensor_(cbitand)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
#if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
return THError("cbitand is only supported for integer type tensors");
#else
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self /= src2
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorBitAndOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 / src2
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorBitAndOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
#endif
}
void THCTensor_(cbitor)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
#if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
return THError("cbitor is only supported for integer type tensors");
#else
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self /= src2
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorBitOrOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 / src2
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorBitOrOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
#endif
}
void THCTensor_(cbitxor)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
#if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
return THError("cbitor is only supported for integer type tensors");
#else
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self /= src2
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorBitXorOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 / src2
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorBitXorOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
#endif
}
void THCTensor_(cmax)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorMaxOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src1);
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorMaxOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
void THCTensor_(cmin)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorMinOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src1);
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorMinOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
void THCTensor_(cmaxValue)(THCState *state, THCTensor *self, THCTensor *src, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (self == src) {
if (!THC_pointwiseApply1<scalar_t>(state, self, TensorMaxValueOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src, TensorMaxValueOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
void THCTensor_(cminValue)(THCState *state, THCTensor *self, THCTensor *src, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (self == src) {
if (!THC_pointwiseApply1<scalar_t>(state, self, TensorMinValueOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src, TensorMinValueOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
#if !defined(THC_REAL_IS_BOOL)
static void propagate_names_if_named_tensor_enabled(THCTensor* result, THCTensor* src) {
#ifdef BUILD_NAMEDTENSOR
at::namedinference::propagate_names(result, src);
#endif
}
#define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_(NAME, CFUNC, REAL) \
struct Tensor_##NAME##_##REAL##_Op { \
__device__ __forceinline__ void operator()(scalar_t* out, scalar_t* in) const { \
*out = CFUNC(*in); \
} \
\
__device__ __forceinline__ void operator()(scalar_t* v) const { \
*v = CFUNC(*v); \
} \
}; \
\
void THCTensor_(NAME)(THCState* state, THCTensor* self_, THCTensor* src) { \
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); \
at::assert_no_internal_overlap(self_); \
if (self_ == src) { \
if (!THC_pointwiseApply1<scalar_t>(state, self_, Tensor_##NAME##_##REAL##_Op())) { \
THArgCheck(false, 2, CUTORCH_DIM_WARNING); \
} \
} else { \
THCTensor_(resizeAs)(state, self_, src); \
\
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, Tensor_##NAME##_##REAL##_Op())) { \
THArgCheck(false, 2, CUTORCH_DIM_WARNING); \
} \
} \
\
THCudaCheck(hipGetLastError()); \
propagate_names_if_named_tensor_enabled(self_, src); \
}
#define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(NAME, CFUNC, REAL) \
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_(NAME, CFUNC, REAL)
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( log, THCNumerics<scalar_t>::log, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(lgamma, THCNumerics<scalar_t>::lgamma, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(log10, THCNumerics<scalar_t>::log10, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(log1p, THCNumerics<scalar_t>::log1p, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( log2, THCNumerics<scalar_t>::log2, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( exp, THCNumerics<scalar_t>::exp, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(expm1, THCNumerics<scalar_t>::expm1, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cos, THCNumerics<scalar_t>::cos, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sin, THCNumerics<scalar_t>::sin, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sqrt, THCNumerics<scalar_t>::sqrt, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(trunc, THCNumerics<scalar_t>::trunc, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( acos, THCNumerics<scalar_t>::acos, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cosh, THCNumerics<scalar_t>::cosh, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( asin, THCNumerics<scalar_t>::asin, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sinh, THCNumerics<scalar_t>::sinh, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( tan, THCNumerics<scalar_t>::tan, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( atan, THCNumerics<scalar_t>::atan, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( tanh, THCNumerics<scalar_t>::tanh, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( erf, THCNumerics<scalar_t>::erf, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( erfc, THCNumerics<scalar_t>::erfc, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( frac, THCNumerics<scalar_t>::frac, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cinv, THCNumerics<scalar_t>::cinv, Real)
#endif
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( abs, THCNumerics<scalar_t>::abs, Real)
#undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_
#undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC
void THCTensor_(clamp)(THCState *state, THCTensor *self_, THCTensor *src, scalar_t min_value,
scalar_t max_value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ == src) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorClampOp<scalar_t>(min_value, max_value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorClampOp<scalar_t>(min_value, max_value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(crossKernel)(THCState *state, THCTensor *self, THCTensor *x, THCTensor *y, int dimension)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, x, y));
int64_t sx = THCTensor_(stride)(state, x, dimension);
int64_t sy = THCTensor_(stride)(state, y, dimension);
int64_t so = THCTensor_(stride)(state, self, dimension);
THCTensor *nx = THCTensor_(newNarrow)(state, x, dimension, 0, 1);
THCTensor *ny = THCTensor_(newNarrow)(state, y, dimension, 0, 1);
THCTensor *nself = THCTensor_(newNarrow)(state, self, dimension, 0, 1);
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, nself, nx, ny, TensorCrossOp<scalar_t>(sx, sy, so))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCTensor_(free)(state, nx);
THCTensor_(free)(state, ny);
THCTensor_(free)(state, nself);
}
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
void THCTensor_(sigmoid)(THCState* state, THCTensor* self_, THCTensor* src) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ == src) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorSigmoidOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorSigmoidOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
#ifdef BUILD_NAMEDTENSOR
at::namedinference::propagate_names(self_, src);
#endif
}
#endif
namespace {
c10::intrusive_ptr<at::TensorImpl, at::UndefinedTensorImpl> retainTensorImpl(THCTensor* self) {
c10::raw::intrusive_ptr::incref(self);
return c10::intrusive_ptr<at::TensorImpl, at::UndefinedTensorImpl>::reclaim(self);
}
}
void THCTensor_(cadd)(THCState *state, THCTensor *self_, THCTensor* src1, scalar_t value, THCTensor *src2)
{
auto out = at::Tensor(retainTensorImpl(self_));
#ifdef THC_REAL_IS_HALF
auto alpha = at::Half(value);
#else
auto alpha = value;
#endif
at::add_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2)), alpha);
}
void THCTensor_(csub)(THCState *state, THCTensor *self_, THCTensor* src1, scalar_t value, THCTensor *src2)
{
auto out = at::Tensor(retainTensorImpl(self_));
#ifdef THC_REAL_IS_HALF
auto alpha = at::Half(value);
#else
auto alpha = value;
#endif
at::sub_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2)), alpha);
}
void THCTensor_(cmul)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
auto out = at::Tensor(retainTensorImpl(self_));
at::mul_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2)));
}
void THCTensor_(cdiv)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
auto out = at::Tensor(retainTensorImpl(self_));
at::div_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2)));
}
void THCTensor_(clshift)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
#if defined(THC_REAL_IS_HALF)
return THError("clshift not supported for torch.CudaHalfTensor");
#else
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self /= src2
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorLShiftOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 / src2
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorLShiftOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
#endif
}
void THCTensor_(crshift)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
#if defined(THC_REAL_IS_HALF)
return THError("crshift not supported for torch.CudaHalfTensor");
#else
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self /= src2
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorRShiftOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 / src2
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorRShiftOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
#endif
}
void THCTensor_(cremainder)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorCRemainderOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src1);
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorCRemainderOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
void THCTensor_(cfmod)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorCFmodOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src1);
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorCFmodOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
#endif
#endif
| 7a4da5ed29b914620e36ea67f46e798e0b93b43d.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THC/generic/THCTensorMathPointwise.cu"
#else
#include <ATen/MemoryOverlap.h>
#include <ATen/NamedTensorUtils.h>
#include <ATen/core/EnableNamedTensor.h>
void THCTensor_(cbitand)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
#if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
return THError("cbitand is only supported for integer type tensors");
#else
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self /= src2
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorBitAndOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 / src2
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorBitAndOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
#endif
}
void THCTensor_(cbitor)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
#if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
return THError("cbitor is only supported for integer type tensors");
#else
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self /= src2
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorBitOrOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 / src2
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorBitOrOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
#endif
}
void THCTensor_(cbitxor)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
#if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
return THError("cbitor is only supported for integer type tensors");
#else
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self /= src2
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorBitXorOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 / src2
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorBitXorOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
#endif
}
void THCTensor_(cmax)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorMaxOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src1);
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorMaxOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
void THCTensor_(cmin)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorMinOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src1);
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorMinOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
void THCTensor_(cmaxValue)(THCState *state, THCTensor *self, THCTensor *src, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (self == src) {
if (!THC_pointwiseApply1<scalar_t>(state, self, TensorMaxValueOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src, TensorMaxValueOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
void THCTensor_(cminValue)(THCState *state, THCTensor *self, THCTensor *src, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (self == src) {
if (!THC_pointwiseApply1<scalar_t>(state, self, TensorMinValueOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src, TensorMinValueOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
#if !defined(THC_REAL_IS_BOOL)
static void propagate_names_if_named_tensor_enabled(THCTensor* result, THCTensor* src) {
#ifdef BUILD_NAMEDTENSOR
at::namedinference::propagate_names(result, src);
#endif
}
#define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_(NAME, CFUNC, REAL) \
struct Tensor_##NAME##_##REAL##_Op { \
__device__ __forceinline__ void operator()(scalar_t* out, scalar_t* in) const { \
*out = CFUNC(*in); \
} \
\
__device__ __forceinline__ void operator()(scalar_t* v) const { \
*v = CFUNC(*v); \
} \
}; \
\
void THCTensor_(NAME)(THCState* state, THCTensor* self_, THCTensor* src) { \
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); \
at::assert_no_internal_overlap(self_); \
if (self_ == src) { \
if (!THC_pointwiseApply1<scalar_t>(state, self_, Tensor_##NAME##_##REAL##_Op())) { \
THArgCheck(false, 2, CUTORCH_DIM_WARNING); \
} \
} else { \
THCTensor_(resizeAs)(state, self_, src); \
\
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, Tensor_##NAME##_##REAL##_Op())) { \
THArgCheck(false, 2, CUTORCH_DIM_WARNING); \
} \
} \
\
THCudaCheck(cudaGetLastError()); \
propagate_names_if_named_tensor_enabled(self_, src); \
}
#define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(NAME, CFUNC, REAL) \
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_(NAME, CFUNC, REAL)
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( log, THCNumerics<scalar_t>::log, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(lgamma, THCNumerics<scalar_t>::lgamma, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(log10, THCNumerics<scalar_t>::log10, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(log1p, THCNumerics<scalar_t>::log1p, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( log2, THCNumerics<scalar_t>::log2, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( exp, THCNumerics<scalar_t>::exp, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(expm1, THCNumerics<scalar_t>::expm1, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cos, THCNumerics<scalar_t>::cos, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sin, THCNumerics<scalar_t>::sin, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sqrt, THCNumerics<scalar_t>::sqrt, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(trunc, THCNumerics<scalar_t>::trunc, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( acos, THCNumerics<scalar_t>::acos, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cosh, THCNumerics<scalar_t>::cosh, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( asin, THCNumerics<scalar_t>::asin, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sinh, THCNumerics<scalar_t>::sinh, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( tan, THCNumerics<scalar_t>::tan, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( atan, THCNumerics<scalar_t>::atan, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( tanh, THCNumerics<scalar_t>::tanh, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( erf, THCNumerics<scalar_t>::erf, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( erfc, THCNumerics<scalar_t>::erfc, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( frac, THCNumerics<scalar_t>::frac, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cinv, THCNumerics<scalar_t>::cinv, Real)
#endif
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( abs, THCNumerics<scalar_t>::abs, Real)
#undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_
#undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC
void THCTensor_(clamp)(THCState *state, THCTensor *self_, THCTensor *src, scalar_t min_value,
scalar_t max_value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ == src) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorClampOp<scalar_t>(min_value, max_value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorClampOp<scalar_t>(min_value, max_value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(crossKernel)(THCState *state, THCTensor *self, THCTensor *x, THCTensor *y, int dimension)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, x, y));
int64_t sx = THCTensor_(stride)(state, x, dimension);
int64_t sy = THCTensor_(stride)(state, y, dimension);
int64_t so = THCTensor_(stride)(state, self, dimension);
THCTensor *nx = THCTensor_(newNarrow)(state, x, dimension, 0, 1);
THCTensor *ny = THCTensor_(newNarrow)(state, y, dimension, 0, 1);
THCTensor *nself = THCTensor_(newNarrow)(state, self, dimension, 0, 1);
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, nself, nx, ny, TensorCrossOp<scalar_t>(sx, sy, so))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCTensor_(free)(state, nx);
THCTensor_(free)(state, ny);
THCTensor_(free)(state, nself);
}
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
void THCTensor_(sigmoid)(THCState* state, THCTensor* self_, THCTensor* src) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ == src) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorSigmoidOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorSigmoidOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
#ifdef BUILD_NAMEDTENSOR
at::namedinference::propagate_names(self_, src);
#endif
}
#endif
namespace {
c10::intrusive_ptr<at::TensorImpl, at::UndefinedTensorImpl> retainTensorImpl(THCTensor* self) {
c10::raw::intrusive_ptr::incref(self);
return c10::intrusive_ptr<at::TensorImpl, at::UndefinedTensorImpl>::reclaim(self);
}
}
void THCTensor_(cadd)(THCState *state, THCTensor *self_, THCTensor* src1, scalar_t value, THCTensor *src2)
{
auto out = at::Tensor(retainTensorImpl(self_));
#ifdef THC_REAL_IS_HALF
auto alpha = at::Half(value);
#else
auto alpha = value;
#endif
at::add_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2)), alpha);
}
void THCTensor_(csub)(THCState *state, THCTensor *self_, THCTensor* src1, scalar_t value, THCTensor *src2)
{
auto out = at::Tensor(retainTensorImpl(self_));
#ifdef THC_REAL_IS_HALF
auto alpha = at::Half(value);
#else
auto alpha = value;
#endif
at::sub_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2)), alpha);
}
void THCTensor_(cmul)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
auto out = at::Tensor(retainTensorImpl(self_));
at::mul_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2)));
}
void THCTensor_(cdiv)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
auto out = at::Tensor(retainTensorImpl(self_));
at::div_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2)));
}
void THCTensor_(clshift)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
#if defined(THC_REAL_IS_HALF)
return THError("clshift not supported for torch.CudaHalfTensor");
#else
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self /= src2
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorLShiftOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 / src2
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorLShiftOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
#endif
}
void THCTensor_(crshift)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
#if defined(THC_REAL_IS_HALF)
return THError("crshift not supported for torch.CudaHalfTensor");
#else
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self /= src2
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorRShiftOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 / src2
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorRShiftOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
#endif
}
void THCTensor_(cremainder)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorCRemainderOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src1);
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorCRemainderOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
void THCTensor_(cfmod)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorCFmodOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src1);
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorCFmodOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
#endif
#endif
|
e626cae6fae4da5bf2dd29d718e7e2fbb79df380.hip | // !!! This is a file automatically generated by hipify!!!
/*************************************************************************
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
************************************************************************/
#include <chrono>
#include <cstdio>
#include <cstdlib>
#include <string>
#include <float.h>
#include "nccl.h"
#include "test_utilities.h"
#include <roctracer/roctx.h>
void showUsage(const char* bin) {
printf("\n"
"Usage: %s <type> <op> <n_min> <n_max> [delta] [gpus] [gpu0 [gpu1 [...]]]\n"
"Where:\n"
#ifdef CUDA_HAS_HALF
" type = [char|int|half|float|double|int64|uint64]\n"
#else
" type = [char|int|float|double|int64|uint64]\n"
#endif
" op = [sum|prod|max|min]\n"
" n_min > 0\n"
" n_max >= n_min\n"
" delta > 0\n\n", bin);
return;
}
int main(int argc, char* argv[]) {
int nvis = 0;
CUDACHECK(hipGetDeviceCount(&nvis));
if (nvis == 0) {
printf("No GPUs found\n");
showUsage(argv[0]);
exit(EXIT_FAILURE);
}
ncclDataType_t type;
ncclRedOp_t op;
int n_min;
int n_max;
int delta;
int gpus;
int* list = nullptr;
if (argc < 5) {
showUsage(argv[0]);
exit(EXIT_FAILURE);
}
type = strToType(argv[1]);
if (type == nccl_NUM_TYPES) {
printf("Invalid <type> '%s'\n", argv[1]);
showUsage(argv[0]);
exit(EXIT_FAILURE);
}
op = strToOp(argv[2]);
if (op == nccl_NUM_OPS) {
printf("Invalid <op> '%s'\n", argv[2]);
showUsage(argv[0]);
exit(EXIT_FAILURE);
}
n_min = strToPosInt(argv[3]);
if (n_min < 1) {
printf("Invalid <n_min> '%s'\n", argv[3]);
showUsage(argv[0]);
exit(EXIT_FAILURE);
}
n_max = strToPosInt(argv[4]);
if (n_max < n_min) {
printf("Invalid <n_max> '%s'\n", argv[4]);
showUsage(argv[0]);
exit(EXIT_FAILURE);
}
if (argc > 5) {
delta = strToPosInt(argv[5]);
if (delta < 1) {
printf("Invalid <delta> '%s'\n", argv[5]);
showUsage(argv[0]);
exit(EXIT_FAILURE);
}
} else {
delta = (n_max == n_min) ? 1 : (n_max - n_min+9) / 10;
}
if (argc > 6) {
gpus = strToPosInt(argv[6]);
if (gpus < 1) {
printf("Invalid <gpus> '%s'\n", argv[6]);
showUsage(argv[0]);
exit(EXIT_FAILURE);
}
} else {
gpus = nvis;
}
list = (int*)malloc(gpus*sizeof(int));
if (argc > 7 && argc != 7+gpus) {
printf("If given, GPU list must be fully specified.\n");
showUsage(argv[0]);
exit(EXIT_FAILURE);
}
for(int g=0; g<gpus; ++g) {
if(argc > 7) {
list[g] = strToNonNeg(argv[7+g]);
if (list[g] < 0) {
printf("Invalid GPU%d '%s'\n", g, argv[7+g]);
showUsage(argv[0]);
exit(EXIT_FAILURE);
} else if (list[g] >= nvis) {
printf("GPU%d (%d) exceeds visible devices (%d)\n", g, list[g], nvis);
showUsage(argv[0]);
exit(EXIT_FAILURE);
}
} else {
list[g] = g % nvis;
}
}
size_t word = wordSize(type);
size_t max_output = n_max * word;
size_t max_input = gpus * max_output;
void* refout;
CUDACHECK(hipHostMalloc(&refout, max_input)); // contains entire reduction
void **input, **output;
double** localError;
ncclComm_t* comm;
hipStream_t* stream;
input = (void**)malloc(gpus*sizeof(void*));
output = (void**)malloc(gpus*sizeof(void*));
localError = (double**)malloc(gpus*sizeof(double*));
comm = (ncclComm_t*)malloc(gpus*sizeof(ncclComm_t));
stream = (hipStream_t*)malloc(gpus*sizeof(hipStream_t));
for(int g=0; g<gpus; ++g) {
char busid[32] = {0};
CUDACHECK(hipDeviceGetPCIBusId(busid, 32, list[g]));
printf("# Rank %d using device %d [%s]\n", g, list[g], busid);
CUDACHECK(hipSetDevice(list[g]));
CUDACHECK(hipMalloc(&input[g], max_input));
CUDACHECK(hipMalloc(&output[g], max_output));
CUDACHECK(hipHostMalloc(&localError[g], sizeof(double)));
CUDACHECK(hipStreamCreate(&stream[g]));
makeRandom(input[g], n_max*gpus, type, 42+g);
if (g == 0)
CUDACHECK(hipMemcpy(refout, input[g], max_input, hipMemcpyDeviceToHost));
else
accVec(refout, input[g], n_max*gpus, type, op);
}
NCCLCHECK(ncclCommInitAll(comm, gpus, list));
printf(" BYTES ERROR MSEC ALGBW BUSBW\n");
for(int n=n_min; n<=n_max; n+=delta) {
size_t bytes = word * n;
for(int g=0; g<gpus; ++g) {
CUDACHECK(hipSetDevice(list[g]));
CUDACHECK(hipMemsetAsync(output[g], 0, bytes, stream[g]));
CUDACHECK(hipStreamSynchronize(stream[g]));
}
auto start = std::chrono::high_resolution_clock::now();
for(int g=0; g<gpus; ++g) {
CUDACHECK(hipSetDevice(list[g]));
NCCLCHECK(ncclReduceScatter(input[g], output[g], n, type, op, comm[g], stream[g]));
}
for(int g=0; g<gpus; ++g) {
CUDACHECK(hipSetDevice(list[g]));
CUDACHECK(hipStreamSynchronize(stream[g]));
}
auto stop = std::chrono::high_resolution_clock::now();
double ms = std::chrono::duration_cast<std::chrono::duration<double>>
(stop - start).count() * 1000.0;
double max_error = 0.0;
for(int g=0; g<gpus; ++g) {
CUDACHECK(hipSetDevice(list[g]));
void* myRef = (void*)((char*)refout + g*bytes);
maxDiff(localError[g], output[g], myRef, n, type, stream[g]);
}
for(int g=0; g<gpus; ++g) {
CUDACHECK(hipSetDevice(list[g]));
CUDACHECK(hipStreamSynchronize(stream[g]));
max_error = max(max_error, *localError[g]);
}
double mb = (double)bytes * 1.e-6;
double algbw = mb / ms;
double busbw = algbw * (double)(gpus - 1);
printf("%12lu %5.0le %10.3lf %6.2lf %6.2lf\n",
n*word, max_error, ms, algbw, busbw);
}
for(int g=0; g<gpus; ++g) {
CUDACHECK(hipSetDevice(list[g]));
CUDACHECK(hipStreamDestroy(stream[g]));
ncclCommDestroy(comm[g]);
CUDACHECK(hipFree(input[g]));
CUDACHECK(hipFree(output[g]));
CUDACHECK(hipHostFree(localError[g]));
}
free(localError);
free(output);
free(input);
free(comm);
free(stream);
CUDACHECK(hipHostFree(refout));
exit(EXIT_SUCCESS);
}
| e626cae6fae4da5bf2dd29d718e7e2fbb79df380.cu | /*************************************************************************
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
************************************************************************/
#include <chrono>
#include <cstdio>
#include <cstdlib>
#include <string>
#include <float.h>
#include "nccl.h"
#include "test_utilities.h"
#include <nvToolsExt.h>
void showUsage(const char* bin) {
printf("\n"
"Usage: %s <type> <op> <n_min> <n_max> [delta] [gpus] [gpu0 [gpu1 [...]]]\n"
"Where:\n"
#ifdef CUDA_HAS_HALF
" type = [char|int|half|float|double|int64|uint64]\n"
#else
" type = [char|int|float|double|int64|uint64]\n"
#endif
" op = [sum|prod|max|min]\n"
" n_min > 0\n"
" n_max >= n_min\n"
" delta > 0\n\n", bin);
return;
}
int main(int argc, char* argv[]) {
int nvis = 0;
CUDACHECK(cudaGetDeviceCount(&nvis));
if (nvis == 0) {
printf("No GPUs found\n");
showUsage(argv[0]);
exit(EXIT_FAILURE);
}
ncclDataType_t type;
ncclRedOp_t op;
int n_min;
int n_max;
int delta;
int gpus;
int* list = nullptr;
if (argc < 5) {
showUsage(argv[0]);
exit(EXIT_FAILURE);
}
type = strToType(argv[1]);
if (type == nccl_NUM_TYPES) {
printf("Invalid <type> '%s'\n", argv[1]);
showUsage(argv[0]);
exit(EXIT_FAILURE);
}
op = strToOp(argv[2]);
if (op == nccl_NUM_OPS) {
printf("Invalid <op> '%s'\n", argv[2]);
showUsage(argv[0]);
exit(EXIT_FAILURE);
}
n_min = strToPosInt(argv[3]);
if (n_min < 1) {
printf("Invalid <n_min> '%s'\n", argv[3]);
showUsage(argv[0]);
exit(EXIT_FAILURE);
}
n_max = strToPosInt(argv[4]);
if (n_max < n_min) {
printf("Invalid <n_max> '%s'\n", argv[4]);
showUsage(argv[0]);
exit(EXIT_FAILURE);
}
if (argc > 5) {
delta = strToPosInt(argv[5]);
if (delta < 1) {
printf("Invalid <delta> '%s'\n", argv[5]);
showUsage(argv[0]);
exit(EXIT_FAILURE);
}
} else {
delta = (n_max == n_min) ? 1 : (n_max - n_min+9) / 10;
}
if (argc > 6) {
gpus = strToPosInt(argv[6]);
if (gpus < 1) {
printf("Invalid <gpus> '%s'\n", argv[6]);
showUsage(argv[0]);
exit(EXIT_FAILURE);
}
} else {
gpus = nvis;
}
list = (int*)malloc(gpus*sizeof(int));
if (argc > 7 && argc != 7+gpus) {
printf("If given, GPU list must be fully specified.\n");
showUsage(argv[0]);
exit(EXIT_FAILURE);
}
for(int g=0; g<gpus; ++g) {
if(argc > 7) {
list[g] = strToNonNeg(argv[7+g]);
if (list[g] < 0) {
printf("Invalid GPU%d '%s'\n", g, argv[7+g]);
showUsage(argv[0]);
exit(EXIT_FAILURE);
} else if (list[g] >= nvis) {
printf("GPU%d (%d) exceeds visible devices (%d)\n", g, list[g], nvis);
showUsage(argv[0]);
exit(EXIT_FAILURE);
}
} else {
list[g] = g % nvis;
}
}
size_t word = wordSize(type);
size_t max_output = n_max * word;
size_t max_input = gpus * max_output;
void* refout;
CUDACHECK(cudaMallocHost(&refout, max_input)); // contains entire reduction
void **input, **output;
double** localError;
ncclComm_t* comm;
cudaStream_t* stream;
input = (void**)malloc(gpus*sizeof(void*));
output = (void**)malloc(gpus*sizeof(void*));
localError = (double**)malloc(gpus*sizeof(double*));
comm = (ncclComm_t*)malloc(gpus*sizeof(ncclComm_t));
stream = (cudaStream_t*)malloc(gpus*sizeof(cudaStream_t));
for(int g=0; g<gpus; ++g) {
char busid[32] = {0};
CUDACHECK(cudaDeviceGetPCIBusId(busid, 32, list[g]));
printf("# Rank %d using device %d [%s]\n", g, list[g], busid);
CUDACHECK(cudaSetDevice(list[g]));
CUDACHECK(cudaMalloc(&input[g], max_input));
CUDACHECK(cudaMalloc(&output[g], max_output));
CUDACHECK(cudaMallocHost(&localError[g], sizeof(double)));
CUDACHECK(cudaStreamCreate(&stream[g]));
makeRandom(input[g], n_max*gpus, type, 42+g);
if (g == 0)
CUDACHECK(cudaMemcpy(refout, input[g], max_input, cudaMemcpyDeviceToHost));
else
accVec(refout, input[g], n_max*gpus, type, op);
}
NCCLCHECK(ncclCommInitAll(comm, gpus, list));
printf(" BYTES ERROR MSEC ALGBW BUSBW\n");
for(int n=n_min; n<=n_max; n+=delta) {
size_t bytes = word * n;
for(int g=0; g<gpus; ++g) {
CUDACHECK(cudaSetDevice(list[g]));
CUDACHECK(cudaMemsetAsync(output[g], 0, bytes, stream[g]));
CUDACHECK(cudaStreamSynchronize(stream[g]));
}
auto start = std::chrono::high_resolution_clock::now();
for(int g=0; g<gpus; ++g) {
CUDACHECK(cudaSetDevice(list[g]));
NCCLCHECK(ncclReduceScatter(input[g], output[g], n, type, op, comm[g], stream[g]));
}
for(int g=0; g<gpus; ++g) {
CUDACHECK(cudaSetDevice(list[g]));
CUDACHECK(cudaStreamSynchronize(stream[g]));
}
auto stop = std::chrono::high_resolution_clock::now();
double ms = std::chrono::duration_cast<std::chrono::duration<double>>
(stop - start).count() * 1000.0;
double max_error = 0.0;
for(int g=0; g<gpus; ++g) {
CUDACHECK(cudaSetDevice(list[g]));
void* myRef = (void*)((char*)refout + g*bytes);
maxDiff(localError[g], output[g], myRef, n, type, stream[g]);
}
for(int g=0; g<gpus; ++g) {
CUDACHECK(cudaSetDevice(list[g]));
CUDACHECK(cudaStreamSynchronize(stream[g]));
max_error = max(max_error, *localError[g]);
}
double mb = (double)bytes * 1.e-6;
double algbw = mb / ms;
double busbw = algbw * (double)(gpus - 1);
printf("%12lu %5.0le %10.3lf %6.2lf %6.2lf\n",
n*word, max_error, ms, algbw, busbw);
}
for(int g=0; g<gpus; ++g) {
CUDACHECK(cudaSetDevice(list[g]));
CUDACHECK(cudaStreamDestroy(stream[g]));
ncclCommDestroy(comm[g]);
CUDACHECK(cudaFree(input[g]));
CUDACHECK(cudaFree(output[g]));
CUDACHECK(cudaFreeHost(localError[g]));
}
free(localError);
free(output);
free(input);
free(comm);
free(stream);
CUDACHECK(cudaFreeHost(refout));
exit(EXIT_SUCCESS);
}
|
2328ee5effb3242a9d87ba704e4bc726e74bdc07.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <layers/sub_layer.hpp>
#include <algorithm>
#include <functional>
#include <utils.cuh>
#include <utils.hpp>
#ifndef NDEBUG
#include <iostream>
#endif
namespace HugeCTR {
namespace {
#define BLOCK_DIM_SIZE 32
template <typename T>
__global__ void sub_kernel(T** inputs, T* output, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size) output[tid] = inputs[0][tid] - inputs[1][tid];
}
template <typename T>
__global__ void sub_dgrad_kernel(const T* top_grad, T** dgrads, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size) {
dgrads[0][tid] = top_grad[tid];
dgrads[1][tid] = 0.0 - top_grad[tid];
}
}
} // end of namespace
template <typename T>
SubLayer<T>::SubLayer(const Tensors2<T>& in_tensors, const Tensor2<T>& out_tensor,
const std::shared_ptr<GeneralBuffer2<CudaAllocator>>& blobs_buff,
const std::shared_ptr<GPUResource>& gpu_resource)
: Layer(gpu_resource) {
try {
size_ = in_tensors[0].get_num_elements();
num_ = in_tensors.size();
// error input checking
auto dims = in_tensors[0].get_dimensions();
if (num_ != 2) {
CK_THROW_(Error_t::WrongInput, "SubLayer needs 2 input tensors");
}
for (size_t i = 1; i < num_; i++) {
if (in_tensors[i].get_dimensions().size() != dims.size()) {
CK_THROW_(Error_t::WrongInput, "All the input tensors must have the same num of dims");
}
for (unsigned int j = 0; j < dims.size(); j++) {
if (in_tensors[i].get_dimensions()[j] != dims[j]) {
CK_THROW_(Error_t::WrongInput, "All the input tensors must have the same dims");
}
}
}
for (size_t i = 0; i < num_; i++) {
in_tensors_.push_back(in_tensors[i]);
}
out_tensors_.push_back(out_tensor);
blobs_buff->reserve({num_}, &d_inputs_);
} catch (const std::runtime_error& rt_err) {
std::cerr << rt_err.what() << std::endl;
throw;
}
}
template <typename T>
void SubLayer<T>::initialize() {
std::shared_ptr<GeneralBuffer2<CudaHostAllocator>> pinned_host_buf =
GeneralBuffer2<CudaHostAllocator>::create();
pinned_host_buf->reserve({num_}, &h_inputs_);
pinned_host_buf->allocate();
for (size_t i = 0; i < num_; i++) {
h_inputs_.get_ptr()[i] = in_tensors_[i].get_ptr();
}
CK_CUDA_THROW_(hipMemcpyAsync((void*)d_inputs_.get_ptr(), (void*)h_inputs_.get_ptr(),
num_ * sizeof(T*), hipMemcpyHostToDevice,
get_gpu().get_stream()));
}
template <typename T>
void SubLayer<T>::fprop(bool is_train) {
CudaDeviceContext context(get_device_id());
T* output = out_tensors_[0].get_ptr();
dim3 block_size(256, 1, 1);
dim3 grid_size((size_ + block_size.x - 1) / block_size.x, 1, 1);
hipLaunchKernelGGL(( sub_kernel), dim3(grid_size), dim3(block_size), 0, get_gpu().get_stream(), d_inputs_.get_ptr(), output,
size_);
#ifndef NDEBUG
hipDeviceSynchronize();
CK_CUDA_THROW_(hipGetLastError());
#endif
}
template <typename T>
void SubLayer<T>::bprop() {
CudaDeviceContext context(get_device_id());
T* output = out_tensors_[0].get_ptr();
dim3 blockSize(256, 1, 1);
dim3 gridSize((size_ + blockSize.x - 1) / blockSize.x, 1, 1);
hipLaunchKernelGGL(( sub_dgrad_kernel), dim3(gridSize), dim3(blockSize), 0, get_gpu().get_stream(), output, d_inputs_.get_ptr(),
size_);
#ifndef NDEBUG
hipDeviceSynchronize();
CK_CUDA_THROW_(hipGetLastError());
#endif
}
template class SubLayer<float>;
} // namespace HugeCTR
| 2328ee5effb3242a9d87ba704e4bc726e74bdc07.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <layers/sub_layer.hpp>
#include <algorithm>
#include <functional>
#include <utils.cuh>
#include <utils.hpp>
#ifndef NDEBUG
#include <iostream>
#endif
namespace HugeCTR {
namespace {
#define BLOCK_DIM_SIZE 32
template <typename T>
__global__ void sub_kernel(T** inputs, T* output, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size) output[tid] = inputs[0][tid] - inputs[1][tid];
}
template <typename T>
__global__ void sub_dgrad_kernel(const T* top_grad, T** dgrads, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size) {
dgrads[0][tid] = top_grad[tid];
dgrads[1][tid] = 0.0 - top_grad[tid];
}
}
} // end of namespace
template <typename T>
SubLayer<T>::SubLayer(const Tensors2<T>& in_tensors, const Tensor2<T>& out_tensor,
const std::shared_ptr<GeneralBuffer2<CudaAllocator>>& blobs_buff,
const std::shared_ptr<GPUResource>& gpu_resource)
: Layer(gpu_resource) {
try {
size_ = in_tensors[0].get_num_elements();
num_ = in_tensors.size();
// error input checking
auto dims = in_tensors[0].get_dimensions();
if (num_ != 2) {
CK_THROW_(Error_t::WrongInput, "SubLayer needs 2 input tensors");
}
for (size_t i = 1; i < num_; i++) {
if (in_tensors[i].get_dimensions().size() != dims.size()) {
CK_THROW_(Error_t::WrongInput, "All the input tensors must have the same num of dims");
}
for (unsigned int j = 0; j < dims.size(); j++) {
if (in_tensors[i].get_dimensions()[j] != dims[j]) {
CK_THROW_(Error_t::WrongInput, "All the input tensors must have the same dims");
}
}
}
for (size_t i = 0; i < num_; i++) {
in_tensors_.push_back(in_tensors[i]);
}
out_tensors_.push_back(out_tensor);
blobs_buff->reserve({num_}, &d_inputs_);
} catch (const std::runtime_error& rt_err) {
std::cerr << rt_err.what() << std::endl;
throw;
}
}
template <typename T>
void SubLayer<T>::initialize() {
std::shared_ptr<GeneralBuffer2<CudaHostAllocator>> pinned_host_buf =
GeneralBuffer2<CudaHostAllocator>::create();
pinned_host_buf->reserve({num_}, &h_inputs_);
pinned_host_buf->allocate();
for (size_t i = 0; i < num_; i++) {
h_inputs_.get_ptr()[i] = in_tensors_[i].get_ptr();
}
CK_CUDA_THROW_(cudaMemcpyAsync((void*)d_inputs_.get_ptr(), (void*)h_inputs_.get_ptr(),
num_ * sizeof(T*), cudaMemcpyHostToDevice,
get_gpu().get_stream()));
}
template <typename T>
void SubLayer<T>::fprop(bool is_train) {
CudaDeviceContext context(get_device_id());
T* output = out_tensors_[0].get_ptr();
dim3 block_size(256, 1, 1);
dim3 grid_size((size_ + block_size.x - 1) / block_size.x, 1, 1);
sub_kernel<<<grid_size, block_size, 0, get_gpu().get_stream()>>>(d_inputs_.get_ptr(), output,
size_);
#ifndef NDEBUG
cudaDeviceSynchronize();
CK_CUDA_THROW_(cudaGetLastError());
#endif
}
template <typename T>
void SubLayer<T>::bprop() {
CudaDeviceContext context(get_device_id());
T* output = out_tensors_[0].get_ptr();
dim3 blockSize(256, 1, 1);
dim3 gridSize((size_ + blockSize.x - 1) / blockSize.x, 1, 1);
sub_dgrad_kernel<<<gridSize, blockSize, 0, get_gpu().get_stream()>>>(output, d_inputs_.get_ptr(),
size_);
#ifndef NDEBUG
cudaDeviceSynchronize();
CK_CUDA_THROW_(cudaGetLastError());
#endif
}
template class SubLayer<float>;
} // namespace HugeCTR
|
1f651e8dbf45765eabbf5b5959d5c4a95e79cc4c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2017 by Contributors
* \file bilinear_sampler.cu
* \brief
* \author Xu Dong
*/
#include "./bilinear_sampler-inl.h"
#include <algorithm>
#include "../common/cuda_utils.h"
#if MXNET_USE_CUDNN == 1 && CUDNN_MAJOR >= 5
#include "./cudnn_bilinear_sampler-inl.h"
#endif // MXNET_USE_CUDNN && CUDNN_MAJOR
namespace mshadow {
namespace cuda {
template<typename DType>
__device__ bool between(DType value, int lowerBound, int upperBound) {
return (value >= lowerBound && value <= upperBound);
}
template<typename DType>
__global__ void BilinearSamplerForwardKernel(const int i_c, const int i_h,
const int i_w, const DType* data,
const DType* grid, const int o_n,
const int o_c, const int o_h,
const int o_w, DType* out) {
for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
index < o_n * o_c * o_h * o_w;
index += blockDim.x * gridDim.x * gridDim.y) {
// (n, c, h, w) is the element in out
int w = index % o_w;
int h = (index / o_w) % o_h;
int c = (index / o_w / o_h) % o_c;
int n = index / o_w / o_h / o_c;
index_t out_index = n * o_c * o_h * o_w + c * o_h * o_w + h * o_w + w;
index_t grid_index = n * o_h * o_w * 2 + h * o_w + w;
DType y_real = (*(grid + grid_index + o_h * o_w) + 1) * (i_h - 1) / 2;
DType x_real = (*(grid + grid_index) + 1) * (i_w - 1) / 2;
int top_left_y = static_cast<int>(floor(y_real));
int top_left_x = static_cast<int>(floor(x_real));
DType top_left_y_w = 1.0 - (y_real - top_left_y);
DType top_left_x_w = 1.0 - (x_real - top_left_x);
int data_index = n * i_c * i_h * i_w + c * i_h * i_w + top_left_y * i_w + top_left_x;
DType top_left_v = 0;
DType top_right_v = 0;
DType bottom_left_v = 0;
DType bottom_right_v = 0;
if (between(top_left_x, 0, i_w-1) && between(top_left_y, 0, i_h-1))
top_left_v = *(data + data_index);
if (between(top_left_x + 1, 0, i_w-1) && between(top_left_y, 0, i_h-1))
top_right_v = *(data + data_index + 1);
if (between(top_left_x, 0, i_w-1) && between(top_left_y + 1, 0, i_h-1))
bottom_left_v = *(data + data_index + i_w);
if (between(top_left_x+1, 0, i_w-1) && between(top_left_y + 1, 0, i_h-1))
bottom_right_v = *(data + data_index + i_w + 1);
*(out+out_index) = top_left_v * top_left_y_w * top_left_x_w +
top_right_v * top_left_y_w * (1.0 - top_left_x_w) +
bottom_left_v * (1.0 - top_left_y_w) * top_left_x_w +
bottom_right_v * (1.0 - top_left_y_w) * (1.0 - top_left_x_w);
}
}
template<typename DType>
__global__ void BilinearSamplerBackwardKernel(const int i_c, const int i_h,
const int i_w, const DType* grad,
const DType* data, const int o_n,
const int o_c, const int o_h,
const int o_w, DType* g_input,
const DType* grid_src,
DType* grad_grid) {
for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
index < o_n * o_h * o_w;
index += blockDim.x * gridDim.x * gridDim.y) {
// (n, c, h, w) is the element in grad
int w = index % o_w;
int h = (index / o_w) % o_h;
int n = index / o_w / o_h;
DType top_left_y_gw = 0.0;
DType top_left_x_gw = 0.0;
index_t grid_src_index = n * o_h * o_w * 2 + h * o_w + w;
DType y_real = (*(grid_src + grid_src_index + o_h * o_w) + 1) * (i_h - 1) / 2;
DType x_real = (*(grid_src + grid_src_index) + 1) * (i_w - 1) / 2;
int top_left_y = static_cast<int>(floor(y_real));
int top_left_x = static_cast<int>(floor(x_real));
DType top_left_y_w = 1.0 - (y_real - top_left_y);
DType top_left_x_w = 1.0 - (x_real - top_left_x);
for (index_t c = 0; c < o_c; ++c) {
index_t grad_index = n * o_c * o_h * o_w + c * o_h * o_w + h * o_w + w;
int data_index = n * i_c * i_h * i_w + c * i_h * i_w + top_left_y * i_w + top_left_x;
// calc 4 vertex value in input data
DType top_left_v = 0;
DType top_right_v = 0;
DType bottom_left_v = 0;
DType bottom_right_v = 0;
// calc input grad
if (between(top_left_x, 0, i_w-1) && between(top_left_y, 0, i_h-1)) {
atomicAdd(&g_input[data_index], *(grad + grad_index) * top_left_y_w * top_left_x_w);
top_left_v = *(data + data_index);
}
if (between(top_left_x+1, 0, i_w-1) && between(top_left_y, 0, i_h-1)) {
atomicAdd(&g_input[data_index + 1], *(grad + grad_index) * top_left_y_w
* (1.0 - top_left_x_w));
top_right_v = *(data + data_index + 1);
}
if (between(top_left_x, 0, i_w-1) && between(top_left_y+1, 0, i_h-1)) {
atomicAdd(&g_input[data_index+ i_w], *(grad + grad_index) * (1.0 - top_left_y_w)
* top_left_x_w);
bottom_left_v = *(data + data_index + i_w);
}
if (between(top_left_x+1, 0, i_w-1) && between(top_left_y+1, 0, i_h-1)) {
atomicAdd(&g_input[data_index+ i_w + 1], *(grad + grad_index) * (1.0 - top_left_y_w)
* (1.0 - top_left_x_w));
bottom_right_v = *(data + data_index + i_w + 1);
}
// calc weight grad of top_left_w, then multiple -1 is the grad of grid_src
top_left_y_gw -= *(grad + grad_index) * (top_right_v - bottom_right_v +
(top_left_v - top_right_v - bottom_left_v + bottom_right_v)
* top_left_x_w);
top_left_x_gw -= *(grad + grad_index) * (bottom_left_v - bottom_right_v +
(top_left_v - top_right_v - bottom_left_v + bottom_right_v)
* top_left_y_w);
}
// calc grad of grid
*(grad_grid + grid_src_index + o_h * o_w) += top_left_y_gw * (i_h - 1) / 2;
*(grad_grid + grid_src_index) += top_left_x_gw * (i_w - 1) / 2;
}
}
} // namespace cuda
template<typename DType>
inline void BilinearSamplerForward(const Tensor<gpu, 4, DType> &output,
const Tensor<gpu, 4, DType> &input,
const Tensor<gpu, 4, DType> &grid_src) {
DType *out = output.dptr_;
const DType *data = input.dptr_;
const DType *grid = grid_src.dptr_;
int o_n = output.size(0), o_c = output.size(1), o_h = output.size(2), o_w = output.size(3);
int i_c = input.size(1), i_h = input.size(2), i_w = input.size(3);
using namespace cuda;
const int max_block = (output.shape_.Size() + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
const int grid_dim_x = (max_block > kMaxGridDim) ? kMaxGridDim : max_block;
const int grid_dim_y =
(max_block > kMaxGridDim) ? (max_block + kMaxGridDim - 1) / kMaxGridDim : 1;
dim3 num_blocks(grid_dim_x, grid_dim_y);
dim3 threads_per_block(kMaxThreadsPerBlock);
CheckLaunchParam(num_blocks, threads_per_block, "bilinear sampler forward");
hipStream_t stream = Stream<gpu>::GetStream(output.stream_);
cuda::BilinearSamplerForwardKernel<DType> << <num_blocks, threads_per_block, 0, stream >> >(
i_c, i_h, i_w, data, grid, o_n, o_c, o_h, o_w, out);
// post kernel check
hipError_t err = hipPeekAtLastError();
CHECK_EQ(err, hipSuccess) << hipGetErrorString(err);
}
template<typename DType>
inline void BilinearSamplerBackward(const Tensor<gpu, 4, DType> &input_grad,
const Tensor<gpu, 4, DType> &ggrid,
const Tensor<gpu, 4, DType> &output_grad,
const Tensor<gpu, 4, DType> &input_data,
const Tensor<gpu, 4, DType> &grid) {
DType *g_input = input_grad.dptr_;
DType *grad_grid = ggrid.dptr_;
const DType *grid_src = grid.dptr_;
const DType *grad = output_grad.dptr_;
const DType *data = input_data.dptr_;
int o_n = output_grad.size(0), o_c = output_grad.size(1),
o_h = output_grad.size(2), o_w = output_grad.size(3);
int i_c = input_data.size(1), i_h = input_data.size(2), i_w = input_data.size(3);
using namespace cuda;
const int max_block = (output_grad.shape_.Size() / o_c + kMaxThreadsPerBlock - 1)
/ kMaxThreadsPerBlock;
const int grid_dim_x = (max_block > kMaxGridDim) ? kMaxGridDim : max_block;
const int grid_dim_y =
(max_block > kMaxGridDim) ? (max_block + kMaxGridDim - 1) / kMaxGridDim : 1;
dim3 num_blocks(grid_dim_x, grid_dim_y);
dim3 threads_per_block(kMaxThreadsPerBlock);
CheckLaunchParam(num_blocks, threads_per_block, "bilinear sampler backward");
hipStream_t stream = Stream<gpu>::GetStream(input_grad.stream_);
cuda::BilinearSamplerBackwardKernel<DType> << <num_blocks, threads_per_block, 0, stream >> >(
i_c, i_h, i_w, grad, data, o_n, o_c, o_h, o_w, g_input, grid_src, grad_grid);
// post kernel check
hipError_t err = hipPeekAtLastError();
CHECK_EQ(err, hipSuccess) << hipGetErrorString(err);
}
} // namespace mshadow
namespace mxnet {
namespace op {
template<>
Operator* CreateOp<gpu>(BilinearSamplerParam param, int dtype) {
Operator *op = NULL;
#if MXNET_USE_CUDNN == 1 && CUDNN_MAJOR >= 5
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new CuDNNBilinearSamplerOp<DType>(param);
})
#else
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new BilinearSamplerOp<gpu, DType>(param);
})
#endif // MXNET_USE_CUDNN && CUDNN_MAJOR
return op;
}
} // namespace op
} // namespace mxnet
| 1f651e8dbf45765eabbf5b5959d5c4a95e79cc4c.cu | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2017 by Contributors
* \file bilinear_sampler.cu
* \brief
* \author Xu Dong
*/
#include "./bilinear_sampler-inl.h"
#include <algorithm>
#include "../common/cuda_utils.h"
#if MXNET_USE_CUDNN == 1 && CUDNN_MAJOR >= 5
#include "./cudnn_bilinear_sampler-inl.h"
#endif // MXNET_USE_CUDNN && CUDNN_MAJOR
namespace mshadow {
namespace cuda {
template<typename DType>
__device__ bool between(DType value, int lowerBound, int upperBound) {
return (value >= lowerBound && value <= upperBound);
}
template<typename DType>
__global__ void BilinearSamplerForwardKernel(const int i_c, const int i_h,
const int i_w, const DType* data,
const DType* grid, const int o_n,
const int o_c, const int o_h,
const int o_w, DType* out) {
for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
index < o_n * o_c * o_h * o_w;
index += blockDim.x * gridDim.x * gridDim.y) {
// (n, c, h, w) is the element in out
int w = index % o_w;
int h = (index / o_w) % o_h;
int c = (index / o_w / o_h) % o_c;
int n = index / o_w / o_h / o_c;
index_t out_index = n * o_c * o_h * o_w + c * o_h * o_w + h * o_w + w;
index_t grid_index = n * o_h * o_w * 2 + h * o_w + w;
DType y_real = (*(grid + grid_index + o_h * o_w) + 1) * (i_h - 1) / 2;
DType x_real = (*(grid + grid_index) + 1) * (i_w - 1) / 2;
int top_left_y = static_cast<int>(floor(y_real));
int top_left_x = static_cast<int>(floor(x_real));
DType top_left_y_w = 1.0 - (y_real - top_left_y);
DType top_left_x_w = 1.0 - (x_real - top_left_x);
int data_index = n * i_c * i_h * i_w + c * i_h * i_w + top_left_y * i_w + top_left_x;
DType top_left_v = 0;
DType top_right_v = 0;
DType bottom_left_v = 0;
DType bottom_right_v = 0;
if (between(top_left_x, 0, i_w-1) && between(top_left_y, 0, i_h-1))
top_left_v = *(data + data_index);
if (between(top_left_x + 1, 0, i_w-1) && between(top_left_y, 0, i_h-1))
top_right_v = *(data + data_index + 1);
if (between(top_left_x, 0, i_w-1) && between(top_left_y + 1, 0, i_h-1))
bottom_left_v = *(data + data_index + i_w);
if (between(top_left_x+1, 0, i_w-1) && between(top_left_y + 1, 0, i_h-1))
bottom_right_v = *(data + data_index + i_w + 1);
*(out+out_index) = top_left_v * top_left_y_w * top_left_x_w +
top_right_v * top_left_y_w * (1.0 - top_left_x_w) +
bottom_left_v * (1.0 - top_left_y_w) * top_left_x_w +
bottom_right_v * (1.0 - top_left_y_w) * (1.0 - top_left_x_w);
}
}
template<typename DType>
__global__ void BilinearSamplerBackwardKernel(const int i_c, const int i_h,
const int i_w, const DType* grad,
const DType* data, const int o_n,
const int o_c, const int o_h,
const int o_w, DType* g_input,
const DType* grid_src,
DType* grad_grid) {
for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
index < o_n * o_h * o_w;
index += blockDim.x * gridDim.x * gridDim.y) {
// (n, c, h, w) is the element in grad
int w = index % o_w;
int h = (index / o_w) % o_h;
int n = index / o_w / o_h;
DType top_left_y_gw = 0.0;
DType top_left_x_gw = 0.0;
index_t grid_src_index = n * o_h * o_w * 2 + h * o_w + w;
DType y_real = (*(grid_src + grid_src_index + o_h * o_w) + 1) * (i_h - 1) / 2;
DType x_real = (*(grid_src + grid_src_index) + 1) * (i_w - 1) / 2;
int top_left_y = static_cast<int>(floor(y_real));
int top_left_x = static_cast<int>(floor(x_real));
DType top_left_y_w = 1.0 - (y_real - top_left_y);
DType top_left_x_w = 1.0 - (x_real - top_left_x);
for (index_t c = 0; c < o_c; ++c) {
index_t grad_index = n * o_c * o_h * o_w + c * o_h * o_w + h * o_w + w;
int data_index = n * i_c * i_h * i_w + c * i_h * i_w + top_left_y * i_w + top_left_x;
// calc 4 vertex value in input data
DType top_left_v = 0;
DType top_right_v = 0;
DType bottom_left_v = 0;
DType bottom_right_v = 0;
// calc input grad
if (between(top_left_x, 0, i_w-1) && between(top_left_y, 0, i_h-1)) {
atomicAdd(&g_input[data_index], *(grad + grad_index) * top_left_y_w * top_left_x_w);
top_left_v = *(data + data_index);
}
if (between(top_left_x+1, 0, i_w-1) && between(top_left_y, 0, i_h-1)) {
atomicAdd(&g_input[data_index + 1], *(grad + grad_index) * top_left_y_w
* (1.0 - top_left_x_w));
top_right_v = *(data + data_index + 1);
}
if (between(top_left_x, 0, i_w-1) && between(top_left_y+1, 0, i_h-1)) {
atomicAdd(&g_input[data_index+ i_w], *(grad + grad_index) * (1.0 - top_left_y_w)
* top_left_x_w);
bottom_left_v = *(data + data_index + i_w);
}
if (between(top_left_x+1, 0, i_w-1) && between(top_left_y+1, 0, i_h-1)) {
atomicAdd(&g_input[data_index+ i_w + 1], *(grad + grad_index) * (1.0 - top_left_y_w)
* (1.0 - top_left_x_w));
bottom_right_v = *(data + data_index + i_w + 1);
}
// calc weight grad of top_left_w, then multiple -1 is the grad of grid_src
top_left_y_gw -= *(grad + grad_index) * (top_right_v - bottom_right_v +
(top_left_v - top_right_v - bottom_left_v + bottom_right_v)
* top_left_x_w);
top_left_x_gw -= *(grad + grad_index) * (bottom_left_v - bottom_right_v +
(top_left_v - top_right_v - bottom_left_v + bottom_right_v)
* top_left_y_w);
}
// calc grad of grid
*(grad_grid + grid_src_index + o_h * o_w) += top_left_y_gw * (i_h - 1) / 2;
*(grad_grid + grid_src_index) += top_left_x_gw * (i_w - 1) / 2;
}
}
} // namespace cuda
template<typename DType>
inline void BilinearSamplerForward(const Tensor<gpu, 4, DType> &output,
const Tensor<gpu, 4, DType> &input,
const Tensor<gpu, 4, DType> &grid_src) {
DType *out = output.dptr_;
const DType *data = input.dptr_;
const DType *grid = grid_src.dptr_;
int o_n = output.size(0), o_c = output.size(1), o_h = output.size(2), o_w = output.size(3);
int i_c = input.size(1), i_h = input.size(2), i_w = input.size(3);
using namespace cuda;
const int max_block = (output.shape_.Size() + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
const int grid_dim_x = (max_block > kMaxGridDim) ? kMaxGridDim : max_block;
const int grid_dim_y =
(max_block > kMaxGridDim) ? (max_block + kMaxGridDim - 1) / kMaxGridDim : 1;
dim3 num_blocks(grid_dim_x, grid_dim_y);
dim3 threads_per_block(kMaxThreadsPerBlock);
CheckLaunchParam(num_blocks, threads_per_block, "bilinear sampler forward");
cudaStream_t stream = Stream<gpu>::GetStream(output.stream_);
cuda::BilinearSamplerForwardKernel<DType> << <num_blocks, threads_per_block, 0, stream >> >(
i_c, i_h, i_w, data, grid, o_n, o_c, o_h, o_w, out);
// post kernel check
cudaError err = cudaPeekAtLastError();
CHECK_EQ(err, cudaSuccess) << cudaGetErrorString(err);
}
template<typename DType>
inline void BilinearSamplerBackward(const Tensor<gpu, 4, DType> &input_grad,
const Tensor<gpu, 4, DType> &ggrid,
const Tensor<gpu, 4, DType> &output_grad,
const Tensor<gpu, 4, DType> &input_data,
const Tensor<gpu, 4, DType> &grid) {
DType *g_input = input_grad.dptr_;
DType *grad_grid = ggrid.dptr_;
const DType *grid_src = grid.dptr_;
const DType *grad = output_grad.dptr_;
const DType *data = input_data.dptr_;
int o_n = output_grad.size(0), o_c = output_grad.size(1),
o_h = output_grad.size(2), o_w = output_grad.size(3);
int i_c = input_data.size(1), i_h = input_data.size(2), i_w = input_data.size(3);
using namespace cuda;
const int max_block = (output_grad.shape_.Size() / o_c + kMaxThreadsPerBlock - 1)
/ kMaxThreadsPerBlock;
const int grid_dim_x = (max_block > kMaxGridDim) ? kMaxGridDim : max_block;
const int grid_dim_y =
(max_block > kMaxGridDim) ? (max_block + kMaxGridDim - 1) / kMaxGridDim : 1;
dim3 num_blocks(grid_dim_x, grid_dim_y);
dim3 threads_per_block(kMaxThreadsPerBlock);
CheckLaunchParam(num_blocks, threads_per_block, "bilinear sampler backward");
cudaStream_t stream = Stream<gpu>::GetStream(input_grad.stream_);
cuda::BilinearSamplerBackwardKernel<DType> << <num_blocks, threads_per_block, 0, stream >> >(
i_c, i_h, i_w, grad, data, o_n, o_c, o_h, o_w, g_input, grid_src, grad_grid);
// post kernel check
cudaError err = cudaPeekAtLastError();
CHECK_EQ(err, cudaSuccess) << cudaGetErrorString(err);
}
} // namespace mshadow
namespace mxnet {
namespace op {
template<>
Operator* CreateOp<gpu>(BilinearSamplerParam param, int dtype) {
Operator *op = NULL;
#if MXNET_USE_CUDNN == 1 && CUDNN_MAJOR >= 5
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new CuDNNBilinearSamplerOp<DType>(param);
})
#else
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new BilinearSamplerOp<gpu, DType>(param);
})
#endif // MXNET_USE_CUDNN && CUDNN_MAJOR
return op;
}
} // namespace op
} // namespace mxnet
|
3cf3432a3ce5bd1ba381f6d176f44d152fdcbe8a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include"cuda_need.h"
__global__ void smooth_pitch(float *data,float *out,size_t pitch,int dataSize,int winsize){
int x = blockIdx.y*blockDim.y + threadIdx.y;
int y = blockIdx.x*blockDim.x + threadIdx.x;
int temp_x, temp_y = 0;
float sum = 0.0;
int count = 0;
float *row_a;
if (x < dataSize&&y < dataSize){
for (int i = 0; i < winsize; i++){
temp_y = y + i;
if (temp_y < dataSize)
row_a = (float*)((char*)data + temp_y * pitch);
else
break;
for (int j = 0; j < winsize; j++){
temp_x = x+ j;
if (temp_x < dataSize){
sum += row_a[temp_x];
count++;
}
}
row_a = (float*)((char*)out + y*pitch);
row_a[x] = sum;
}
}
}
void smooth2D_pre_data(char filepath[], int imgsize, float* memcpyHD_2D, float* memcpyDH_2D, float* kernel_2D, float* total_2D)
{
float *d_data;
float *d_out;
float timeDelay;
size_t pitch;
clock_t begintime, endtime,totalbegintime,totalendtime;
float *data = new float[imgsize*imgsize];
readData(filepath, data, imgsize);
totalbegintime = clock();
// printf("cuda_smooth2D begin.....\n");
hipMallocPitch((void**)&d_data, &pitch, imgsize*sizeof(float), imgsize);
hipMallocPitch((void**)&d_out, &pitch, imgsize*sizeof(float), imgsize);
begintime = clock();
hipMemcpy2D(d_data, pitch, data, imgsize*sizeof(float), imgsize*sizeof(float), imgsize, hipMemcpyHostToDevice);
endtime = clock();
timeDelay = (double)(endtime - begintime) * 1000 / CLOCKS_PER_SEC;
*memcpyHD_2D = *memcpyHD_2D + timeDelay;
//printf("in 2D memcpyHostToDevice time is :%.3fms\n", timeDelay);
begintime = clock();
// the gpu used maximum number of threads of per block:1024
dim3 dimBlock(32, 32);
//max of grid 2147483647
dim3 dimGrid((imgsize + dimBlock.x - 1) / (dimBlock.x), (imgsize + dimBlock.y - 1) / (dimBlock.y));
smooth_pitch << <dimGrid, dimBlock >> >(d_data, d_out, pitch, imgsize, WINSIZE);
//smooth1D << <dimGrid, dimBlock >> >(d_data, d_out, DATASIZE, WINSIZE);
endtime = clock();
timeDelay = (double)(endtime - begintime) * 1000 / CLOCKS_PER_SEC;
*kernel_2D = *kernel_2D + timeDelay;
//printf("in 2D kernel function time :%.3fms\n", timeDelay);
begintime = clock();
hipMemcpy2D(data, imgsize*sizeof(float), d_out, pitch, imgsize*sizeof(float), imgsize, hipMemcpyDeviceToHost);
endtime = clock();
timeDelay = (double)(endtime - begintime) * 1000 / CLOCKS_PER_SEC;
*memcpyDH_2D = *memcpyDH_2D + timeDelay;
totalendtime = clock();
// printf("in 2D memcpyDeviceToHost time is :%.3fms\n", timeDelay);
timeDelay = (double)(totalendtime - totalbegintime) * 1000 / CLOCKS_PER_SEC;
*total_2D = *total_2D + timeDelay;
// printf("in 2D cuda_smooth2D total time is :%.3fms\n", timeDelay);
//printf("\n\n");
}
| 3cf3432a3ce5bd1ba381f6d176f44d152fdcbe8a.cu | #include"cuda_need.h"
__global__ void smooth_pitch(float *data,float *out,size_t pitch,int dataSize,int winsize){
int x = blockIdx.y*blockDim.y + threadIdx.y;
int y = blockIdx.x*blockDim.x + threadIdx.x;
int temp_x, temp_y = 0;
float sum = 0.0;
int count = 0;
float *row_a;
if (x < dataSize&&y < dataSize){
for (int i = 0; i < winsize; i++){
temp_y = y + i;
if (temp_y < dataSize)
row_a = (float*)((char*)data + temp_y * pitch);
else
break;
for (int j = 0; j < winsize; j++){
temp_x = x+ j;
if (temp_x < dataSize){
sum += row_a[temp_x];
count++;
}
}
row_a = (float*)((char*)out + y*pitch);
row_a[x] = sum;
}
}
}
void smooth2D_pre_data(char filepath[], int imgsize, float* memcpyHD_2D, float* memcpyDH_2D, float* kernel_2D, float* total_2D)
{
float *d_data;
float *d_out;
float timeDelay;
size_t pitch;
clock_t begintime, endtime,totalbegintime,totalendtime;
float *data = new float[imgsize*imgsize];
readData(filepath, data, imgsize);
totalbegintime = clock();
// printf("cuda_smooth2D begin.....\n");
cudaMallocPitch((void**)&d_data, &pitch, imgsize*sizeof(float), imgsize);
cudaMallocPitch((void**)&d_out, &pitch, imgsize*sizeof(float), imgsize);
begintime = clock();
cudaMemcpy2D(d_data, pitch, data, imgsize*sizeof(float), imgsize*sizeof(float), imgsize, cudaMemcpyHostToDevice);
endtime = clock();
timeDelay = (double)(endtime - begintime) * 1000 / CLOCKS_PER_SEC;
*memcpyHD_2D = *memcpyHD_2D + timeDelay;
//printf("in 2D memcpyHostToDevice time is :%.3fms\n", timeDelay);
begintime = clock();
// the gpu used maximum number of threads of per block:1024
dim3 dimBlock(32, 32);
//max of grid 2147483647
dim3 dimGrid((imgsize + dimBlock.x - 1) / (dimBlock.x), (imgsize + dimBlock.y - 1) / (dimBlock.y));
smooth_pitch << <dimGrid, dimBlock >> >(d_data, d_out, pitch, imgsize, WINSIZE);
//smooth1D << <dimGrid, dimBlock >> >(d_data, d_out, DATASIZE, WINSIZE);
endtime = clock();
timeDelay = (double)(endtime - begintime) * 1000 / CLOCKS_PER_SEC;
*kernel_2D = *kernel_2D + timeDelay;
//printf("in 2D kernel function time :%.3fms\n", timeDelay);
begintime = clock();
cudaMemcpy2D(data, imgsize*sizeof(float), d_out, pitch, imgsize*sizeof(float), imgsize, cudaMemcpyDeviceToHost);
endtime = clock();
timeDelay = (double)(endtime - begintime) * 1000 / CLOCKS_PER_SEC;
*memcpyDH_2D = *memcpyDH_2D + timeDelay;
totalendtime = clock();
// printf("in 2D memcpyDeviceToHost time is :%.3fms\n", timeDelay);
timeDelay = (double)(totalendtime - totalbegintime) * 1000 / CLOCKS_PER_SEC;
*total_2D = *total_2D + timeDelay;
// printf("in 2D cuda_smooth2D total time is :%.3fms\n", timeDelay);
//printf("\n\n");
}
|
ffaf7bc827c6eaec3a2d58e66caf9945996ede6d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <iostream>
//#include <hip/hip_cooperative_groups.h>
#include <math.h>
#include <string.h>
#include <sstream>
#include <fstream>
//#include <bits/stdc++.h>
//#include <stdlib.h>
//#include <time.h>
using namespace std;
//using namespace cooperative_groups;
/***DEFINING THE DEFINES FOR THE ARRAY INDICES****************************/
//#define N 32
#define C 96
#define H 31
#define W 31
#define R 5
#define S 5
#define M 256
#define E 27
#define F 27
#define U 1
__global__ void red_ch(float* d_r, float* d_o, int num_ch, int num_img, int num_wt)
{
//printf("gpu2 started\n");
float red_sum = 0;
int row = threadIdx.y; int col = threadIdx.x;
for(int i=0; i<num_ch; i++)
{
red_sum += d_o[i*(num_wt*num_img*blockDim.x*blockDim.y)+blockIdx.x*num_wt*blockDim.x*blockDim.y+blockIdx.y*blockDim.x*blockDim.y+row*blockDim.x+col] ;
}
d_r[blockIdx.x*num_wt*blockDim.x*blockDim.y+blockIdx.y*blockDim.x*blockDim.y+row*blockDim.x+col] = red_sum;
}
__global__
void ew_gpu_mmul(float* d_o, float* d_i, float* d_w, int width, int height, int stride, int ip_height, int wt_width, int num_wt,int num_img, int num_ch)
{//printf("gpu started\n");
__shared__ float s_w[R*S];
__shared__ float s_i[H*W];
int row = threadIdx.y; int col = threadIdx.x;
if(row*width+col<R*S)
{
s_w[row*width+col] = d_w[blockIdx.y*num_ch*wt_width*wt_width+blockIdx.z*wt_width*wt_width+(row*width+col)];
}
{
int s_i_idx = row*blockDim.x+col;
s_i[s_i_idx] = d_i[blockIdx.x*num_ch*ip_height*ip_height+blockIdx.z*ip_height*ip_height+s_i_idx];
if(s_i_idx+729 < H*W)
s_i[s_i_idx+729]= d_i[blockIdx.x*num_ch*ip_height*ip_height+blockIdx.z*ip_height*ip_height+s_i_idx+729];
}
__syncthreads();
float prod = 0;
if((row<height) && (col<width))//earlier it was num_wt*height & num_img*width
{
for (int i=0; i<wt_width; i++){
float3 ip1 = *((float3*)(s_i+(stride*row+i)*ip_height+stride*col)); float3 wt1 = *((float3*)(s_w+i*wt_width));
float3 ip2 = *((float3*)(s_i+(stride*row+i)*ip_height+stride*col+3));float3 wt2 = *((float3*)(s_w+i*wt_width+3));
prod += ip1.x*wt1.x+ip1.y*wt1.y+ip1.z*wt1.z+ip2.x*wt2.x+ip2.y*wt2.y;
__syncthreads();
}
if(prod>=0)
d_o[0*(num_wt*num_img*blockDim.x*blockDim.y)+blockIdx.x*num_wt*blockDim.x*blockDim.y+blockIdx.y*blockDim.x*blockDim.y+row*blockDim.x+col] += prod;
if(row*width+col<R*S){
s_w[(row*width+col)] = 0;
__syncthreads();
}
}
}
void element_wise_mmul(float* output, float* input, float* weight, int batch_size)
{
int x,y,i,j,m,n,k;
for(n=0; n<batch_size; n++){
for (m=0 ; m<M; m++){
for (x=0; x<F; x++){
for(y=0; y<E; y++){
// OP[x][y] = 0; // adding bias to output
for (i=0; i<R; i++){
for (j=0; j<S; j++){
for(k=0; k<C; k++){
float ip = input[n*C*H*W+k*H*W+(U*x+i)*H+(U*y+j)];
float wt = weight[m*C*R*S+k*R*S+i*S+j];
float prod = ip*wt;
if(prod>=0)
output[n*E*F*M+m*E*F+x*E+y] += prod;
//OP[x][y] += IP[U*x+i][U*y+j]*WT[i][j];
}}
}
}
}
}
}
}
int main(int argc, char* argv[])
{
int batch_size = atoi(argv[1]);
/*************INITALIZING MATRICES*********************************/
float *IP = (float*) malloc(batch_size*C*H*W*sizeof(float));
//float IP[H][W];
float *OP = (float*) malloc(batch_size*M*F*E*sizeof(float));
//float OP[F][E];
float *OPG = (float*) malloc(batch_size*M*F*E*sizeof(float));
float *WT = (float*) malloc(M*C*R*S*sizeof(float));
//float WT[R][S];
float* d_o;
float* d_i;
float* d_w;
float* d_r;
//clock_t cpu_start, gpu_start, cpu_end, gpu_end;
//int a,b,c,d;
int c,d,m,n,k;
/*INITIALIZING WEIGHT MATRIX*/
for (m=0; m<M; m++){
for(k=0;k<C;k++){
for (c=0; c<R; c++){
for(d=0; d<S; d++){
//WT[c][d] = 2.0;
//WT[m*C*R*S+k*R*S+c*S+d] = (int)k+1;
WT[m*C*R*S+k*R*S+c*S+d] = (float)rand()/(float)(RAND_MAX+1.0);
}
}
}
}
/*INITIALIZING OUTPUT MATRIX*/
for (n=0; n<batch_size;n++){
for (m=0; m<M; m++){
for (c=0; c<F; c++){
for(d=0; d<E; d++){
//OP[c][d] = 0;
OP[n*M*F*E+m*F*E+c*E+d] = 0;
}
}
}
}
/*INITIALIZING INPUT MATRIX*/
for (n=0; n<batch_size; n++){
for(k=0;k<C;k++){
for (c=0; c<H; c++){
for(d=0; d<W; d++){
if ((c<=1) || (d<=1) || (c>=29) || (d>=29))
IP[n*C*H*W+k*H*W+c*W+d] = 0;
else
IP[n*C*H*W+k*H*W+c*W+d] = (float)rand()/(RAND_MAX+1.0);
// IP[c][d] = (a+b+c+d);
//IP[n*C*H*W+k*H*W+c*W+d] = (float)(c+d)/255;
}
}
}
}
if(hipSuccess != hipMalloc((void**) &d_i,batch_size*C*H*W*sizeof(float)))
{
printf("error in d_i malloc\n");
}
hipMemcpy(d_i, IP, batch_size*C*H*W*sizeof(float), hipMemcpyHostToDevice);
if(hipSuccess != hipMalloc((void**) &d_w, M*C*R*S*sizeof(float)))
{
printf("error in d_w malloc\n");
}
hipMemcpy(d_w, WT, M*C*R*S*sizeof(float), hipMemcpyHostToDevice);
if(hipSuccess != hipMalloc((void**) &d_o,(long int)batch_size*M*E*F*sizeof(float)))
{
printf("error in d_o malloc\n");
}
if(hipSuccess != hipMalloc((void**) &d_r,batch_size*M*E*F*sizeof(float)))
{
printf("error in d_r malloc\n");
}
//cpu_start = clock();
//element_wise_mmul(OP, IP, WT,batch_size);
printf("cpu done\n");
//cpu_end = clock();
dim3 dimGrid(batch_size,256,96);
dim3 dimBlock(27,27,1);
//dim3 dimGridRed(batch_size,256,1);
//dim3 dimBlockRed(27,27,1);
//int op_height = 3; int op_width = 3; int stride = 1; int ip_height = 4;int wt_height = 2; int num_wt = 96; int num_img = 1; int num_ch = 384;
//gpu_start = clock();
//hipFuncSetSharedMemConfig(ew_gpu_mmul,hipSharedMemBankSizeEightByte);hipLaunchKernelGGL((
ew_gpu_mmul), dim3(dimGrid), dim3(dimBlock), 0, 0, d_o,d_i,d_w,27,27,1,31,5,256,batch_size,96);
hipDeviceSynchronize();
//red_ch<<<dimGridRed, dimBlockRed>>>(d_r,d_o,96,batch_size,256);
//gpu_end = clock();
//void *kernelArgs[] = {(void *)&d_o, (void *)&d_i, (void *)&d_w,(void *)&op_height, (void *)&op_width, (void *)&stride, (void *)&ip_height,(void *)&wt_height, (void *)&num_wt, (void *)&num_img, (void *)&num_ch };
//hipLaunchCooperativeKernel((void*)ew_gpu_mmul,dimGrid,dimBlock,kernelArgs,0,NULL);
//hipDeviceSynchronize();
hipMemcpy(OPG,d_o,batch_size*M*E*F*sizeof(float), hipMemcpyDeviceToHost);
/**print outputs**/
//int e,f,g,h;
int g,h,s,u;
float max_error = 0;
string filename = "layer_2_"+to_string(batch_size);
ifstream fin(filename.c_str());
string line ;
//for (t=0;t<C;t++){
for (u=0;u<batch_size;u++){
for (s=0;s<M;s++){
for (g=0; g<F; g++){
for(h=0; h<E; h++){
getline(fin,line);
float error = abs(OPG[u*M*F*E+s*E*F+g*E+h]-atof(line.c_str()));
//float error = abs(OPG[u*M*F*E+s*E*F+g*E+h]-OP[u*M*F*E+s*E*F+g*E+h]);
if(error > max_error)
max_error = error;
// printf("the output is %f for index %d, %d,%d,%d.\n",OP[u*M*F*E+s*E*F+g*E+h],u,s,g,h);
// printf("diff CPU and GPU is %f for index %d,%d,%d,%d.\n", OPG[u*M*F*E+s*E*F+g*E+h]-OP[u*M*F*E+s*E*F+g*E+h],u,s,g,h);
// printf("the output from GPU is %f for index,%d,%d,%d,%d.\n",OPG[u*M*F*E+s*E*F+g*E+h],u,s,g,h);
}
}
}
}
fin.close();
printf("max error is %f\n", max_error);
//}
//cout<<"time taken by cpu call is "<<((double)(cpu_end-cpu_start))/CLOCKS_PER_SEC<<"secs"<<endl;
//cout<<"time taken by gpu call is "<<((double)(gpu_end-gpu_start))/CLOCKS_PER_SEC<<"secs"<<endl;
hipFree(d_o);
hipFree(d_i);
hipFree(d_w);
hipFree(d_r);
free(OPG);
free(IP);
free(WT);
free(OP);
return 0;
}
| ffaf7bc827c6eaec3a2d58e66caf9945996ede6d.cu | #include <stdio.h>
#include <iostream>
//#include <cooperative_groups.h>
#include <math.h>
#include <string.h>
#include <sstream>
#include <fstream>
//#include <bits/stdc++.h>
//#include <stdlib.h>
//#include <time.h>
using namespace std;
//using namespace cooperative_groups;
/***DEFINING THE DEFINES FOR THE ARRAY INDICES****************************/
//#define N 32
#define C 96
#define H 31
#define W 31
#define R 5
#define S 5
#define M 256
#define E 27
#define F 27
#define U 1
__global__ void red_ch(float* d_r, float* d_o, int num_ch, int num_img, int num_wt)
{
//printf("gpu2 started\n");
float red_sum = 0;
int row = threadIdx.y; int col = threadIdx.x;
for(int i=0; i<num_ch; i++)
{
red_sum += d_o[i*(num_wt*num_img*blockDim.x*blockDim.y)+blockIdx.x*num_wt*blockDim.x*blockDim.y+blockIdx.y*blockDim.x*blockDim.y+row*blockDim.x+col] ;
}
d_r[blockIdx.x*num_wt*blockDim.x*blockDim.y+blockIdx.y*blockDim.x*blockDim.y+row*blockDim.x+col] = red_sum;
}
__global__
void ew_gpu_mmul(float* d_o, float* d_i, float* d_w, int width, int height, int stride, int ip_height, int wt_width, int num_wt,int num_img, int num_ch)
{//printf("gpu started\n");
__shared__ float s_w[R*S];
__shared__ float s_i[H*W];
int row = threadIdx.y; int col = threadIdx.x;
if(row*width+col<R*S)
{
s_w[row*width+col] = d_w[blockIdx.y*num_ch*wt_width*wt_width+blockIdx.z*wt_width*wt_width+(row*width+col)];
}
{
int s_i_idx = row*blockDim.x+col;
s_i[s_i_idx] = d_i[blockIdx.x*num_ch*ip_height*ip_height+blockIdx.z*ip_height*ip_height+s_i_idx];
if(s_i_idx+729 < H*W)
s_i[s_i_idx+729]= d_i[blockIdx.x*num_ch*ip_height*ip_height+blockIdx.z*ip_height*ip_height+s_i_idx+729];
}
__syncthreads();
float prod = 0;
if((row<height) && (col<width))//earlier it was num_wt*height & num_img*width
{
for (int i=0; i<wt_width; i++){
float3 ip1 = *((float3*)(s_i+(stride*row+i)*ip_height+stride*col)); float3 wt1 = *((float3*)(s_w+i*wt_width));
float3 ip2 = *((float3*)(s_i+(stride*row+i)*ip_height+stride*col+3));float3 wt2 = *((float3*)(s_w+i*wt_width+3));
prod += ip1.x*wt1.x+ip1.y*wt1.y+ip1.z*wt1.z+ip2.x*wt2.x+ip2.y*wt2.y;
__syncthreads();
}
if(prod>=0)
d_o[0*(num_wt*num_img*blockDim.x*blockDim.y)+blockIdx.x*num_wt*blockDim.x*blockDim.y+blockIdx.y*blockDim.x*blockDim.y+row*blockDim.x+col] += prod;
if(row*width+col<R*S){
s_w[(row*width+col)] = 0;
__syncthreads();
}
}
}
void element_wise_mmul(float* output, float* input, float* weight, int batch_size)
{
int x,y,i,j,m,n,k;
for(n=0; n<batch_size; n++){
for (m=0 ; m<M; m++){
for (x=0; x<F; x++){
for(y=0; y<E; y++){
// OP[x][y] = 0; // adding bias to output
for (i=0; i<R; i++){
for (j=0; j<S; j++){
for(k=0; k<C; k++){
float ip = input[n*C*H*W+k*H*W+(U*x+i)*H+(U*y+j)];
float wt = weight[m*C*R*S+k*R*S+i*S+j];
float prod = ip*wt;
if(prod>=0)
output[n*E*F*M+m*E*F+x*E+y] += prod;
//OP[x][y] += IP[U*x+i][U*y+j]*WT[i][j];
}}
}
}
}
}
}
}
int main(int argc, char* argv[])
{
int batch_size = atoi(argv[1]);
/*************INITALIZING MATRICES*********************************/
float *IP = (float*) malloc(batch_size*C*H*W*sizeof(float));
//float IP[H][W];
float *OP = (float*) malloc(batch_size*M*F*E*sizeof(float));
//float OP[F][E];
float *OPG = (float*) malloc(batch_size*M*F*E*sizeof(float));
float *WT = (float*) malloc(M*C*R*S*sizeof(float));
//float WT[R][S];
float* d_o;
float* d_i;
float* d_w;
float* d_r;
//clock_t cpu_start, gpu_start, cpu_end, gpu_end;
//int a,b,c,d;
int c,d,m,n,k;
/*INITIALIZING WEIGHT MATRIX*/
for (m=0; m<M; m++){
for(k=0;k<C;k++){
for (c=0; c<R; c++){
for(d=0; d<S; d++){
//WT[c][d] = 2.0;
//WT[m*C*R*S+k*R*S+c*S+d] = (int)k+1;
WT[m*C*R*S+k*R*S+c*S+d] = (float)rand()/(float)(RAND_MAX+1.0);
}
}
}
}
/*INITIALIZING OUTPUT MATRIX*/
for (n=0; n<batch_size;n++){
for (m=0; m<M; m++){
for (c=0; c<F; c++){
for(d=0; d<E; d++){
//OP[c][d] = 0;
OP[n*M*F*E+m*F*E+c*E+d] = 0;
}
}
}
}
/*INITIALIZING INPUT MATRIX*/
for (n=0; n<batch_size; n++){
for(k=0;k<C;k++){
for (c=0; c<H; c++){
for(d=0; d<W; d++){
if ((c<=1) || (d<=1) || (c>=29) || (d>=29))
IP[n*C*H*W+k*H*W+c*W+d] = 0;
else
IP[n*C*H*W+k*H*W+c*W+d] = (float)rand()/(RAND_MAX+1.0);
// IP[c][d] = (a+b+c+d);
//IP[n*C*H*W+k*H*W+c*W+d] = (float)(c+d)/255;
}
}
}
}
if(cudaSuccess != cudaMalloc((void**) &d_i,batch_size*C*H*W*sizeof(float)))
{
printf("error in d_i malloc\n");
}
cudaMemcpy(d_i, IP, batch_size*C*H*W*sizeof(float), cudaMemcpyHostToDevice);
if(cudaSuccess != cudaMalloc((void**) &d_w, M*C*R*S*sizeof(float)))
{
printf("error in d_w malloc\n");
}
cudaMemcpy(d_w, WT, M*C*R*S*sizeof(float), cudaMemcpyHostToDevice);
if(cudaSuccess != cudaMalloc((void**) &d_o,(long int)batch_size*M*E*F*sizeof(float)))
{
printf("error in d_o malloc\n");
}
if(cudaSuccess != cudaMalloc((void**) &d_r,batch_size*M*E*F*sizeof(float)))
{
printf("error in d_r malloc\n");
}
//cpu_start = clock();
//element_wise_mmul(OP, IP, WT,batch_size);
printf("cpu done\n");
//cpu_end = clock();
dim3 dimGrid(batch_size,256,96);
dim3 dimBlock(27,27,1);
//dim3 dimGridRed(batch_size,256,1);
//dim3 dimBlockRed(27,27,1);
//int op_height = 3; int op_width = 3; int stride = 1; int ip_height = 4;int wt_height = 2; int num_wt = 96; int num_img = 1; int num_ch = 384;
//gpu_start = clock();
//cudaFuncSetSharedMemConfig(ew_gpu_mmul,cudaSharedMemBankSizeEightByte);
ew_gpu_mmul<<<dimGrid, dimBlock>>>(d_o,d_i,d_w,27,27,1,31,5,256,batch_size,96);
cudaDeviceSynchronize();
//red_ch<<<dimGridRed, dimBlockRed>>>(d_r,d_o,96,batch_size,256);
//gpu_end = clock();
//void *kernelArgs[] = {(void *)&d_o, (void *)&d_i, (void *)&d_w,(void *)&op_height, (void *)&op_width, (void *)&stride, (void *)&ip_height,(void *)&wt_height, (void *)&num_wt, (void *)&num_img, (void *)&num_ch };
//cudaLaunchCooperativeKernel((void*)ew_gpu_mmul,dimGrid,dimBlock,kernelArgs,0,NULL);
//cudaDeviceSynchronize();
cudaMemcpy(OPG,d_o,batch_size*M*E*F*sizeof(float), cudaMemcpyDeviceToHost);
/**print outputs**/
//int e,f,g,h;
int g,h,s,u;
float max_error = 0;
string filename = "layer_2_"+to_string(batch_size);
ifstream fin(filename.c_str());
string line ;
//for (t=0;t<C;t++){
for (u=0;u<batch_size;u++){
for (s=0;s<M;s++){
for (g=0; g<F; g++){
for(h=0; h<E; h++){
getline(fin,line);
float error = abs(OPG[u*M*F*E+s*E*F+g*E+h]-atof(line.c_str()));
//float error = abs(OPG[u*M*F*E+s*E*F+g*E+h]-OP[u*M*F*E+s*E*F+g*E+h]);
if(error > max_error)
max_error = error;
// printf("the output is %f for index %d, %d,%d,%d.\n",OP[u*M*F*E+s*E*F+g*E+h],u,s,g,h);
// printf("diff CPU and GPU is %f for index %d,%d,%d,%d.\n", OPG[u*M*F*E+s*E*F+g*E+h]-OP[u*M*F*E+s*E*F+g*E+h],u,s,g,h);
// printf("the output from GPU is %f for index,%d,%d,%d,%d.\n",OPG[u*M*F*E+s*E*F+g*E+h],u,s,g,h);
}
}
}
}
fin.close();
printf("max error is %f\n", max_error);
//}
//cout<<"time taken by cpu call is "<<((double)(cpu_end-cpu_start))/CLOCKS_PER_SEC<<"secs"<<endl;
//cout<<"time taken by gpu call is "<<((double)(gpu_end-gpu_start))/CLOCKS_PER_SEC<<"secs"<<endl;
cudaFree(d_o);
cudaFree(d_i);
cudaFree(d_w);
cudaFree(d_r);
free(OPG);
free(IP);
free(WT);
free(OP);
return 0;
}
|
c1c1f06dbc7340007a84d51c18ee64ecb49b908f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <unistd.h>
#include <istream>
#include <iostream>
#include <sstream>
//Only include parameters file if we're not creating the shared library
#ifndef PYTHON
#include "params.h"
#endif
#include "supsmu.h"
#include <stdlib.h>
#include <math.h>
#include <algorithm>
#include <numeric>
#include <vector>
#include <fstream>
#include <omp.h>
#include <thrust/sort.h>
#include <thrust/host_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/device_vector.h>
#include <thrust/extrema.h>
#include "structs.h"
#include "main.h"
#include "kernel.h"
using namespace std;
template <typename T>
std::vector<int> sort_indexes(const std::vector<T> &v) {
// initialize original index locations
std::vector<int> idx(v.size());
iota(idx.begin(), idx.end(), 0);
// sort indexes based on comparing values in v
// using std::stable_sort instead of std::sort
// to avoid unnecessary index re-orderings
// when v contains elements of equal values
stable_sort(idx.begin(), idx.end(),
[&v](int i1, int i2) {return v[i1] < v[i2];});
return idx;
}
//original port from Nat's code before breaking into two separate functions
void supsmu_periodogram(int n, const DTYPE minFreq, const DTYPE maxFreq, int numFreq, DTYPE * time, DTYPE * data, DTYPE * error, DTYPE alpha, DTYPE * pgram)
{
DTYPE deltaf=(maxFreq-minFreq)/numFreq;
//runs supersmoother for folded lightcurves on a frequency grid
//compute minimum time
DTYPE minTime=time[0];
for (int i=0; i<n; i++)
{
if (time[i]<minTime)
{
minTime=time[i];
}
}
DTYPE * tt = (DTYPE *)malloc(sizeof(DTYPE)*n);
for (int i=0; i<n; i++)
{
tt[i]=time[i]-minTime;
}
DTYPE * weights = (DTYPE *)malloc(sizeof(DTYPE)*n);
for (int i=0; i<n; i++)
{
weights[i]=1.0/(error[i]*error[i]);
}
DTYPE w0=0.0;
for (int i=0; i<n; i++)
{
w0+=weights[i];
}
w0=w0/(n*1.0);
DTYPE * y = (DTYPE *)malloc(sizeof(DTYPE)*n);
std::copy(data, data+n, y);
DTYPE tmp=0;
for (int i=0; i<n; i++)
{
tmp+=(data[i]*weights[i]);
}
tmp=tmp/(n*1.0);
DTYPE y0=tmp/w0;
for (int i=0; i<n; i++)
{
y[i]=y[i]-y0;
}
//
tmp=0;
for (int i=0; i<n; i++)
{
tmp+=(y[i]*y[i])*weights[i];
}
DTYPE chi0=tmp/(n*1.0);
DTYPE * chi2=(DTYPE *)malloc(sizeof(DTYPE)*numFreq);
//Arrays that need to be allocated for each thread
DTYPE * sc=(DTYPE *)malloc(sizeof(DTYPE)*n*8*NTHREADSCPU);
DTYPE * smo=(DTYPE *)malloc(sizeof(DTYPE)*n*NTHREADSCPU);
DTYPE * t1=(DTYPE *)malloc(sizeof(DTYPE)*n*NTHREADSCPU);
int * argkeys=(int *)malloc(sizeof(int)*n*NTHREADSCPU);
DTYPE * t1_sortby_argkeys=(DTYPE *)malloc(sizeof(DTYPE)*n*NTHREADSCPU);
DTYPE * data_sortby_argkeys=(DTYPE *)malloc(sizeof(DTYPE)*n*NTHREADSCPU);
DTYPE * weights_sortby_argkeys=(DTYPE *)malloc(sizeof(DTYPE)*n*NTHREADSCPU);
double tstart=omp_get_wtime();
#pragma omp parallel for num_threads(NTHREADSCPU)
for (int i=0; i<numFreq; i++)
{
int tid=omp_get_thread_num();
//Offsets into arrays for each thread
unsigned int offset_n=tid*n;
unsigned int offset_sc=tid*n*8;
DTYPE p=1.0/(minFreq+(deltaf*i));
for (int j=0; j<n; j++)
{
t1[offset_n+j]=fmod(tt[j],p)/p;
}
//Do argsort on t1
// sortKeyValuePairsIntDouble(argkeys+offset_n, t1+offset_n, n);
sortKeyValuePairsIntFloatDouble(argkeys+offset_n, t1+offset_n, n);
//Map t1, data, and weights to the order given by argsorting t1
mapArr(t1+offset_n, t1_sortby_argkeys+offset_n, argkeys+offset_n, n);
mapArr(data, data_sortby_argkeys+offset_n, argkeys+offset_n, n);
mapArr(weights, weights_sortby_argkeys+offset_n, argkeys+offset_n, n);
chi2[i]=supsmu_chi2(n, t1_sortby_argkeys+offset_n, data_sortby_argkeys+offset_n, weights_sortby_argkeys+offset_n, smo+offset_n, sc+offset_sc, alpha);
}
double tend=omp_get_wtime();
printf("\nTime main loop: %f", tend - tstart);
for (int i=0; i<numFreq; i++)
{
pgram[i]=(0.5*(chi0-chi2[i])*n)/chi0;
}
}
//overloaded function for float/doubles
void mapArr(double * inArr, double * outArr, int * keys, int n)
{
for (int i=0; i<n; i++)
{
outArr[i]=inArr[keys[i]];
}
}
//overloaded function for float/doubles
void mapArr(float * inArr, float * outArr, int * keys, int n)
{
for (int i=0; i<n; i++)
{
outArr[i]=inArr[keys[i]];
}
}
//overloaded function for float/doubles
void sortKeyValuePairsIntFloatDouble(int * keys, double * values, int n)
{
std::vector<double>val_vect(values, values+n);
std::vector<int>keys_vect = sort_indexes(val_vect);
std::copy(keys_vect.begin(), keys_vect.end(), keys);
}
//overloaded function for float/doubles
void sortKeyValuePairsIntFloatDouble(int * keys, float * values, int n)
{
std::vector<float>val_vect(values, values+n);
std::vector<int>keys_vect = sort_indexes(val_vect);
std::copy(keys_vect.begin(), keys_vect.end(), keys);
}
//overloaded function for float/doubles
// void sortKeyValuePairsIntDouble(int * keys, double * values, int n)
// {
// std::vector<double>val_vect(values, values+n);
// std::vector<int>keys_vect = sort_indexes(val_vect);
// std::copy(keys_vect.begin(), keys_vect.end(), keys);
// }
//overloaded function for float/doubles
// void sortKeyValuePairsIntFloat(int * keys, float * values, int n)
// {
// std::vector<float>val_vect(values, values+n);
// std::vector<int>keys_vect = sort_indexes(val_vect);
// std::copy(keys_vect.begin(), keys_vect.end(), keys);
// }
DTYPE supsmu_chi2(int n, DTYPE * time, DTYPE * data, DTYPE * weights , DTYPE * smo, DTYPE * sc, DTYPE alpha)
{
//NAT: is iper==1? [yes- means periodic]
int iper=1;
//NAT: is span==0.0? [yes- lets supersmoother work its magic (otherwise uses input span)]
DTYPE span=0.0;
supsmu(n, time, data, weights, iper, span, alpha, smo, sc);
DTYPE tmptotal=0;
for (int i=0; i<n; i++){
tmptotal+=((data[i]-smo[i])*(data[i]-smo[i]))*weights[i];
}
return tmptotal/(n*1.0);
}
DTYPE supsmu_singlepass_chi2(int n, DTYPE * time, DTYPE * data, DTYPE * weights , DTYPE * smo, DTYPE alpha)
{
//NAT: is iper==1? [yes- means periodic]
int iper=1;
//NAT: is span==0.0? [yes- lets supersmoother work its magic (otherwise uses input span)]
DTYPE span=0.0;
supsmusinglepass(n, time, data, weights, iper, span, alpha, smo);
DTYPE tmptotal=0;
for (int i=0; i<n; i++){
tmptotal+=((data[i]-smo[i])*(data[i]-smo[i]))*weights[i];
}
return tmptotal/(n*1.0);
}
//Copied/pasted comments from original fortran code by Friedman
// input:
// n : number of observations (x,y - pairs).
// x(n) : ordered abscissa values.
// y(n) : corresponding ordinate (response) values.
// w(n) : weight for each (x,y) observation.
// iper : periodic variable flag.
// iper=1 => x is ordered interval variable.
// iper=2 => x is a periodic variable with values
// in the range (0.0,1.0) and period 1.0.
// span : smoother span (fraction of observations in window).
// span=0.0 => automatic (variable) span selection.
// alpha : controles high frequency (small span) penality
// used with automatic span selection (bass tone control).
// (alpha.le.0.0 or alpha.gt.10.0 => no effect.)
// output:
// smo(n) : smoothed ordinate (response) values.
// scratch:
// sc(n,7) : internal working storage.
int supsmu (int n, DTYPE * x, DTYPE * y, DTYPE * w, int iper, DTYPE span, DTYPE alpha, DTYPE * smo, DTYPE * sc) {
// sc is scratch space (8,n)
// output is smo: smoothed version of y
int i,j,jper;
DTYPE vsmlsq,sw,sy,a,scale,resmin,tmp,f;
// spans to be estimated: tweeter, midrange, and woofer
DTYPE spans[] = {0.05,0.2,0.5};
if (x[n-1]<=x[0]) {
sy=0.0;
sw=sy;
for (j=0;j<n;j++) {
sy=sy+w[j]*y[j];
sw=sw+w[j];
}
a=0.0;
if (sw>0) a=sy/sw;
for (j=0;j<n;j++) smo[j] = a;
return 0;
}
i=n/4-1;
j=3*(i+1)-1;
scale=x[j]-x[i];
//Nat: can be removed
// while (scale<=0) {
// if (j<n-1) j+=1;
// if (i>0) i-=1;
// scale=x[j]-x[i];
// }
vsmlsq=1.e-6*scale*scale;
jper=iper;
if (iper==2 && (x[0]<0 || x[n-1]>1)) jper=1;
if (jper<1 || jper>2) jper=1;
if (span>0) {
smooth (n,x,y,w,span,jper,vsmlsq,smo,sc); // fixed span
return 0;
}
// if we made it here, the span will be estimated and variable
for (i=0;i<3;i++) {
smooth (n,x,y,w,spans[i],jper,vsmlsq,sc+2*i*n,sc+6*n);
smooth (n,x,sc+6*n,w,spans[1],-jper,vsmlsq,sc+(2*i+1)*n,sc+7*n);
}
for (j=0;j<n;j++) {
resmin=1.e20;
for (i=0;i<3;i++) {
if (sc[j+(2*i+1)*n]<resmin) {
resmin=sc[j+(2*i+1)*n];
sc[j+6*n]=spans[i];
}
}
if (alpha>0 && alpha<=10 && resmin<sc[j+5*n] && resmin>0) {
tmp = resmin/sc[j+5*n];
if (tmp<1.e-7) tmp=1.e-7;
sc[j+6*n]+=(spans[2]-sc[j+6*n])*pow(tmp,10.0-alpha);
}
}
smooth (n,x,sc+6*n,w,spans[1],-jper,vsmlsq,sc+n,sc+7*n);
for (j=0;j<n;j++) {
if (sc[j+n]<=spans[0]) sc[j+n]=spans[0];
if (sc[j+n]>=spans[2]) sc[j+n]=spans[2];
f=sc[j+n]-spans[1];
if (f<0) {
f/=spans[0]-spans[1];
sc[j+3*n]=(1.0-f)*sc[j+2*n]+f*sc[j];
} else {
f/=spans[2]-spans[1];
sc[j+3*n]=(1.0-f)*sc[j+2*n]+f*sc[j+4*n];
}
}
smooth (n,x,sc+3*n,w,spans[0],-jper,vsmlsq,smo,sc+7*n);
return 0;
}
int smooth (int n, DTYPE * x, DTYPE * y, DTYPE * w, DTYPE span, int iper, DTYPE vsmlsq, DTYPE * smo, DTYPE * acvr) {
int i,j,jper,in,out,ibw,it; //j0,
DTYPE xto,xti;
DTYPE wt,fbo,fbw=0.,xm=0.,ym=0.,tmp,var=0.,cvar=0.,a,h; //,sy
jper=abs(iper);
ibw=0.5*span*n+0.5;
if (ibw<2) ibw=2;
it=2*ibw+1;
for (i=0;i<it;i++) {
j=i;
if (jper==2) j=i-ibw-1;
if (j<0) {
j+=n;
xti=x[j]-1.0;
} else xti=x[j];
wt=w[j];
fbo=fbw;
fbw+=wt;
if (fbw>0) {
xm=(fbo*xm+wt*xti)/fbw;
ym=(fbo*ym+wt*y[j])/fbw;
}
if (fbo>0) {
tmp=fbw*wt*(xti-xm)/fbo;
var+=tmp*(xti-xm);
cvar+=tmp*(y[j]-ym);
}
}
for (j=0;j<n;j++) {
out=j-ibw-1;
in=j+ibw;
if (jper==2 || (out>=0 && in<n)) {
if (in>n-1) {
in-=n;
xti=x[in]+1.0;
} else xti=x[in];
if (out<0) {
out+=n;
xto=x[out]-1.0;
} else xto=x[out];
wt=w[out];
fbo=fbw;
fbw-=wt;
if (fbw>0) {
tmp=fbo*wt*(xto-xm)/fbw;
var-=tmp*(xto-xm);
cvar-=tmp*(y[out]-ym);
}
if (fbw>0) {
xm=(fbo*xm-wt*xto)/fbw;
ym=(fbo*ym-wt*y[out])/fbw;
}
wt=w[in];
fbo=fbw;
fbw+=wt;
if (fbw>0) {
xm=(fbo*xm+wt*xti)/fbw;
ym=(fbo*ym+wt*y[in])/fbw;
}
if (fbo>0) {
tmp=fbw*wt*(xti-xm)/fbo;
var+=tmp*(xti-xm);
cvar+=tmp*(y[in]-ym);
}
}
a=0.0;
if (var>vsmlsq) a=cvar/var;
smo[j]=a*(x[j]-xm)+ym;
if (iper>0) {
h=0.0;
if (fbw>0) h=1.0/fbw;
if (var>vsmlsq) h+=(x[j]-xm)*(x[j]-xm)/var;
acvr[j]=0.0;
a=1.0-w[j]*h;
if (a>0) acvr[j]=fabs(y[j]-smo[j])/a;
else if (j>0) acvr[j]=acvr[j-1];
}
}
//Nat: can be removed
// for (j=0;j<n;j++) {
// sy=smo[j]*w[j];
// fbw=w[j];
// j0=j;
// while (j<n-1 && x[j+1]<=x[j]) {
// j+=1;
// sy+=w[j]*smo[j];
// fbw+=w[j];
// }
// if (j>j0) {
// a=0.0;
// if (fbw>0) a=sy/fbw;
// for (i=j0;i<=j;i++) smo[i]=a;
// }
// }
return 0;
}
int supsmusinglepass(int n, DTYPE * x, DTYPE * y, DTYPE * w, int iper, DTYPE span, DTYPE alpha, DTYPE * smo)
{
int ibw[3];
DTYPE vsmlsq,scale;
DTYPE spans[] = {0.05,0.2,0.5};
int i=n/4-1;
int j=3*(i+1)-1;
scale=x[j]-x[i];
vsmlsq=1.e-6*scale*scale;
for (i=0;i<3;i++) {
ibw[i] = (int)( 0.5*spans[i]*n+0.5 );
if (ibw[i]<2) ibw[i]=2;
}
if (alpha<0) alpha=0;
if (alpha>10) alpha=10;
smoothsinglepass(n, ibw, x, y, w, vsmlsq, alpha, smo);
return 0;
}
void smoothsinglepass(int n, int *ibw, DTYPE *x, DTYPE *y, DTYPE *w, DTYPE vsmlsq, int alpha, DTYPE *smo)
{
int i,j,in,out;
DTYPE wt,xto,xti,yto,yti,ibwb,smo0[3],a,f,chi2,chi2m;
DTYPE fbo,fbw[3],xm[3],ym[3],tmp,var[3]={0,0,0},vary=0.,cvar[3]={0,0,0};
for (i=0;i<3;i++) {
j=n-ibw[i]-1;
xm[i]=x[j]-1.0;
ym[i]=y[j];
fbw[i]=w[j];
for (j=n-ibw[i];j<n;j++) {
xti=x[j]-1.0;
yti=y[j];
wt=w[j];
fbo=fbw[i];
fbw[i]+=wt;
xm[i]=(fbo*xm[i]+wt*xti)/fbw[i];
ym[i]=(fbo*ym[i]+wt*yti)/fbw[i];
tmp=fbw[i]*wt*(xti-xm[i])/fbo;
var[i]+=tmp*(xti-xm[i]);
cvar[i]+=tmp*(yti-ym[i]);
if (i==0) vary+=fbw[0]*wt*(yti-ym[0])*(yti-ym[0])/fbo;
}
for (j=0;j<ibw[i];j++) {
xti=x[j];
yti=y[j];
wt=w[j];
fbo=fbw[i];
fbw[i]+=wt;
xm[i]=(fbo*xm[i]+wt*xti)/fbw[i];
ym[i]=(fbo*ym[i]+wt*yti)/fbw[i];
tmp=fbw[i]*wt*(xti-xm[i])/fbo;
var[i]+=tmp*(xti-xm[i]);
cvar[i]+=tmp*(yti-ym[i]);
if (i==0) vary+=fbw[0]*wt*(yti-ym[0])*(yti-ym[0])/fbo;
}
}
for (j=0;j<n;j++) {
for (i=0;i<3;i++) {
out=j-ibw[i]-1;
in=j+ibw[i];
if (in>n-1) {
in-=n;
xti=x[in]+1.0;
} else xti=x[in];
if (out<0) {
out+=n;
xto=x[out]-1.0;
} else xto=x[out];
yti=y[in];
yto=y[out];
wt=w[out];
fbo=fbw[i];
fbw[i]-=wt;
tmp=fbo*wt*(xto-xm[i])/fbw[i];
var[i]-=tmp*(xto-xm[i]);
cvar[i]-=tmp*(yto-ym[i]);
if (i==0) vary-=fbo*wt*(yto-ym[0])*(yto-ym[0])/fbw[0];
xm[i]=(fbo*xm[i]-wt*xto)/fbw[i];
ym[i]=(fbo*ym[i]-wt*yto)/fbw[i];
wt=w[in];
fbo=fbw[i];
fbw[i]+=wt;
xm[i]=(fbo*xm[i]+wt*xti)/fbw[i];
ym[i]=(fbo*ym[i]+wt*yti)/fbw[i];
tmp=fbw[i]*wt*(xti-xm[i])/fbo;
var[i]+=tmp*(xti-xm[i]);
cvar[i]+=tmp*(yti-ym[i]);
if (i==0) vary+=fbw[0]*wt*(yti-ym[0])*(yti-ym[0])/fbo;
}
chi2m=1.e20; ibwb=ibw[2];
for (i=0;i<3;i++) {
a=0.0;
if (var[i]>vsmlsq) a=cvar[i]/var[i];
smo0[i]=a*(x[j]-xm[i])+ym[i];
chi2 = vary-2*a*cvar[0]+a*a*var[0];
if (i>0) {
tmp = ym[i]-ym[0]-a*(xm[i]-xm[0]);
chi2 += tmp*tmp*fbw[0];
}
tmp=1.0/fbw[i];
if (var[i]>vsmlsq) tmp+=(x[j]-xm[i])*(x[j]-xm[i])/var[i];
tmp = 1.0 - w[j]*tmp;
chi2 = fabs(chi2)/(tmp*tmp);
if (chi2<chi2m) {
chi2m=chi2;
ibwb=(ibw[1]+ibw[i])/2.;
}
}
tmp = sqrt(chi2m/chi2);
if (tmp<1.e-7) tmp=1.e-7;
ibwb+=(ibw[2]-ibwb)*pow(tmp,10.-alpha);
f = ibwb-ibw[1];
if (f<0) {
f/=ibw[0]-ibw[1];
smo[j]=(1.0-f)*smo0[1]+f*smo0[0];
} else {
f/=ibw[2]-ibw[1];
smo[j]=(1.0-f)*smo0[1]+f*smo0[2];
}
}
}
void supsmu_periodogram_innerloopcpu(int iteration, int n, DTYPE freqToTest, DTYPE * time, DTYPE * data, DTYPE * error, DTYPE alpha, DTYPE * pgram,
DTYPE * tt, DTYPE * weights, DTYPE * chi2, DTYPE * sc, DTYPE * smo, DTYPE * t1, int * argkeys, DTYPE * t1_sortby_argkeys,
DTYPE * data_sortby_argkeys, DTYPE * weights_sortby_argkeys)
{
int tid=omp_get_thread_num();
//Offsets into arrays for each thread
unsigned int offset_n=tid*n;
unsigned int offset_sc=tid*n*8;
DTYPE p=1.0/freqToTest;
for (int j=0; j<n; j++)
{
t1[offset_n+j]=fmod(tt[j],p)/p;
}
sortKeyValuePairsIntFloatDouble(argkeys+offset_n, t1+offset_n, n);
//Map t1, data, and weights to the order given by argsorting t1
mapArr(t1+offset_n, t1_sortby_argkeys+offset_n, argkeys+offset_n, n);
mapArr(data, data_sortby_argkeys+offset_n, argkeys+offset_n, n);
mapArr(weights, weights_sortby_argkeys+offset_n, argkeys+offset_n, n);
chi2[iteration]=supsmu_chi2(n, t1_sortby_argkeys+offset_n, data_sortby_argkeys+offset_n, weights_sortby_argkeys+offset_n, smo+offset_n, sc+offset_sc, alpha);
}
void supsmu_singlepass_periodogram_innerloopcpu(int iteration, int n, DTYPE freqToTest, DTYPE * time, DTYPE * data, DTYPE * error, DTYPE alpha, DTYPE * pgram,
DTYPE * tt, DTYPE * weights, DTYPE * chi2, DTYPE * smo, DTYPE * t1, int * argkeys, DTYPE * t1_sortby_argkeys,
DTYPE * data_sortby_argkeys, DTYPE * weights_sortby_argkeys)
{
int tid=omp_get_thread_num();
//Offsets into arrays for each thread
unsigned int offset_n=tid*n;
DTYPE p=1.0/freqToTest;
for (int j=0; j<n; j++)
{
t1[offset_n+j]=fmod(tt[j],p)/p;
}
sortKeyValuePairsIntFloatDouble(argkeys+offset_n, t1+offset_n, n);
// printf("\nCPU argkeys: ");
// for (int j=0; j<n; j++)
// {
// printf("\n%d",argkeys[offset_n+j]);
// }
//Map t1, data, and weights to the order given by argsorting t1
mapArr(t1+offset_n, t1_sortby_argkeys+offset_n, argkeys+offset_n, n);
mapArr(data, data_sortby_argkeys+offset_n, argkeys+offset_n, n);
mapArr(weights, weights_sortby_argkeys+offset_n, argkeys+offset_n, n);
// printf("\nCPU t1: ");
// for (int j=0; j<n; j++)
// {
// printf("\n%f",t1[offset_n+j]);
// }
// printf("\nCPU argkeys: ");
// for (int j=0; j<n; j++)
// {
// printf("\n%d",argkeys[offset_n+j]);
// }
// printf("\nCPU t1_sortby_argkeys: ");
// for (int j=0; j<n; j++)
// {
// printf("\n%f",t1_sortby_argkeys[offset_n+j]);
// }
// printf("\n****************");
chi2[iteration]=supsmu_singlepass_chi2(n, t1_sortby_argkeys+offset_n, data_sortby_argkeys+offset_n, weights_sortby_argkeys+offset_n, smo+offset_n, alpha);
}
//single object processing
//MODEFLAG- 0 default supersmoother with multiple passes
//MODEFLAG- 1 Nat's singlepass supersmoother
void supersmoothercpusingleobject(bool MODEFLAG, DTYPE * time, DTYPE * data, DTYPE * error, const unsigned int sizeData,
const unsigned int numFreqs, const DTYPE minFreq, const DTYPE maxFreq, const DTYPE freqStep, DTYPE alpha,
DTYPE * pgram, DTYPE * foundPeriod, DTYPE * chi2, DTYPE * sc, DTYPE * smo, DTYPE * t1, int * argkeys,
DTYPE * t1_sortby_argkeys, DTYPE * data_sortby_argkeys, DTYPE * weights_sortby_argkeys, DTYPE * weights, DTYPE * tt)
{
DTYPE chi0=0;
compute_chi0_tt_weights(sizeData, time, data, error, &chi0, tt, weights);
//Default supersmoother
if(MODEFLAG==0)
{
//Single object -- parallelize over frequencies
#pragma omp parallel for num_threads(NTHREADSCPU) schedule(static)
for (unsigned int i=0; i<numFreqs; i++)
{
DTYPE freqToTest=minFreq+(freqStep*i);
supsmu_periodogram_innerloopcpu(i, sizeData, freqToTest, time, data, error, alpha, pgram, tt, weights, chi2, sc, smo, t1, argkeys, t1_sortby_argkeys, data_sortby_argkeys, weights_sortby_argkeys);
}
}
//Nat's single pass supersmoother
else if(MODEFLAG==1)
{
printf("\nRunning single pass");
//Single object -- parallelize over frequencies
#pragma omp parallel for num_threads(NTHREADSCPU) schedule(static)
for (unsigned int i=0; i<numFreqs; i++)
{
DTYPE freqToTest=minFreq+(freqStep*i);
supsmu_singlepass_periodogram_innerloopcpu(i, sizeData, freqToTest, time, data, error, alpha, pgram, tt, weights, chi2, smo, t1, argkeys, t1_sortby_argkeys, data_sortby_argkeys, weights_sortby_argkeys);
}
}
for (unsigned int i=0; i<numFreqs; i++)
{
pgram[i]=(0.5*(chi0-chi2[i])*sizeData)/chi0;
}
computePeriodSuperSmoother(pgram, numFreqs, minFreq, maxFreq, foundPeriod);
}
void computePeriodSuperSmoother(DTYPE * pgram, const unsigned int numFreqs, const DTYPE minFreq, const DTYPE maxFreq, DTYPE * foundPeriod)
{
DTYPE deltaf=(maxFreq-minFreq)/(numFreqs*1.0);
int maxPowerIdx=0;
DTYPE maxPower=pgram[0];
for (unsigned int i=0; i<numFreqs; i++)
{
if (pgram[i]>maxPower)
{
maxPower=pgram[i];
maxPowerIdx=i;
}
}
printf("\nFreq: %f, maxpowerIdx: %d",(minFreq+(maxPowerIdx*deltaf)), maxPowerIdx);
*foundPeriod=1.0/(minFreq+(maxPowerIdx*deltaf));
}
//MODEFLAG-0 original supsmu (multi-pass)
//MODEFLAG-1 Nat's Single-pass supsmu
void supersmootherCPUBatch(bool MODEFLAG, unsigned int * objectId, DTYPE * time, DTYPE * data, DTYPE * error, unsigned int sizeData, const DTYPE minFreq, const DTYPE maxFreq,
const unsigned int numFreqs, DTYPE * sumPeriods, DTYPE ** pgram, DTYPE * foundPeriod, DTYPE alpha,
DTYPE * chi2, DTYPE * sc, DTYPE * smo, DTYPE * t1, int * argkeys,
DTYPE * t1_sortby_argkeys, DTYPE * data_sortby_argkeys, DTYPE * weights_sortby_argkeys, DTYPE * weights, DTYPE * tt)
{
//compute the object ranges in the arrays and store in struct
//This is given by the objectId
struct lookupObj * objectLookup=NULL;
unsigned int numUniqueObjects;
computeObjectRanges(objectId, &sizeData, &objectLookup, &numUniqueObjects);
#ifndef PYTHON
*pgram=(DTYPE *)malloc(sizeof(DTYPE)*(numFreqs)*numUniqueObjects);
#endif
foundPeriod=(DTYPE *)malloc(sizeof(DTYPE)*numUniqueObjects);
const DTYPE freqStep=(maxFreq-minFreq)/(numFreqs*1.0);
//number of objects skipped because they didn't have enough observations
unsigned int countSkippedObjectsThresh=0;
//for each object, call the parallel cpu algorithm
// for (unsigned int i=0; i<numUniqueObjects; i++)
for (unsigned int i=0; i<numUniqueObjects; i++)
{
unsigned int idxMin=objectLookup[i].idxMin;
unsigned int idxMax=objectLookup[i].idxMax;
unsigned int sizeDataForObject=idxMax-idxMin+1;
uint64_t pgramOffset=(uint64_t)i*(uint64_t)numFreqs;
//make sure the object has at least OBJTHRESH observations
if (sizeDataForObject>=OBSTHRESH)
{
supersmoothercpusingleobject(MODEFLAG, &time[idxMin], &data[idxMin], &error[idxMin], sizeDataForObject,
numFreqs, minFreq, maxFreq, freqStep, alpha, *pgram+pgramOffset, foundPeriod+i, chi2, sc,
smo, t1, argkeys, t1_sortby_argkeys, data_sortby_argkeys, weights_sortby_argkeys, weights, tt);
}
//too few data points to compute the periods
else
{
countSkippedObjectsThresh++;
foundPeriod[i]=0.0;
}
printf("\nObject: %d, Period: %f",objectLookup[i].objId, foundPeriod[i]);
}
printf("\nNumber of objects skipped because they didn't have %d observations: %u", OBSTHRESH, countSkippedObjectsThresh);
#if PRINTPERIODS==1
for (unsigned int i=0; i<numUniqueObjects; i++)
{
printf("\nObject: %d, Period: %f",objectLookup[i].objId, foundPeriod[i]);
}
#endif
//Validation
for (unsigned int i=0; i<numUniqueObjects; i++)
{
(*sumPeriods)+=foundPeriod[i];
}
}
//original port from Nat's code
//First step: making one big function except the kernel call
//will call on the CPU first for testing
void supsmu_periodogram_GPU_Batch_prototype(unsigned int * objectId, unsigned int sizeData, const DTYPE minFreq, const DTYPE maxFreq, int numFreq, DTYPE * time, DTYPE * data, DTYPE * error, DTYPE alpha, DTYPE * pgram, DTYPE * foundPeriod)
{
struct lookupObj * objectLookup=NULL;
unsigned int numUniqueObjects;
computeObjectRanges(objectId, &sizeData, &objectLookup, &numUniqueObjects);
pgram=(DTYPE *)malloc(sizeof(DTYPE)*(numFreq)*numUniqueObjects);
foundPeriod=(DTYPE *)malloc(sizeof(DTYPE)*numUniqueObjects);
//Allocate once
DTYPE * chi2=(DTYPE *)malloc(sizeof(DTYPE)*numFreq*numUniqueObjects);
DTYPE * y = (DTYPE *)malloc(sizeof(DTYPE)*sizeData);
DTYPE * weights = (DTYPE *)malloc(sizeof(DTYPE)*sizeData);
DTYPE * tt = (DTYPE *)malloc(sizeof(DTYPE)*sizeData);
DTYPE deltaf=(maxFreq-minFreq)/(numFreq*1.0);
//compute minimum time
DTYPE minTime=time[0];
for (unsigned int i=0; i<sizeData; i++)
{
if (time[i]<minTime)
{
minTime=time[i];
}
}
DTYPE w0=0.0;
DTYPE tmp=0;
for (unsigned int i=0; i<sizeData; i++)
{
tt[i]=time[i]-minTime;
weights[i]=1.0/(error[i]*error[i]);
w0+=weights[i];
tmp+=(data[i]*weights[i]);
}
w0=w0/(sizeData*1.0);
tmp=tmp/(sizeData*1.0);
DTYPE y0=tmp/w0;
std::copy(data, data+sizeData, y);
// DTYPE tmp=0;
// for (int i=0; i<sizeData; i++)
// {
// tmp+=(data[i]*weights[i]);
// }
// tmp=tmp/(sizeData*1.0);
// DTYPE y0=tmp/w0;
tmp=0;
for (unsigned int i=0; i<sizeData; i++)
{
y[i]=y[i]-y0;
tmp+=(y[i]*y[i])*weights[i];
}
//
// tmp=0;
// for (int i=0; i<sizeData; i++)
// {
// tmp+=(y[i]*y[i])*weights[i];
// }
DTYPE chi0=tmp/(sizeData*1.0);
//Arrays that need to be allocated for each thread
DTYPE * sc=(DTYPE *)malloc(sizeof(DTYPE)*sizeData*7*NTHREADSCPU);
DTYPE * smo=(DTYPE *)malloc(sizeof(DTYPE)*sizeData*NTHREADSCPU);
DTYPE * t1=(DTYPE *)malloc(sizeof(DTYPE)*sizeData*NTHREADSCPU);
int * argkeys=(int *)malloc(sizeof(int)*sizeData*NTHREADSCPU);
DTYPE * t1_sortby_argkeys=(DTYPE *)malloc(sizeof(DTYPE)*sizeData*NTHREADSCPU);
DTYPE * data_sortby_argkeys=(DTYPE *)malloc(sizeof(DTYPE)*sizeData*NTHREADSCPU);
DTYPE * weights_sortby_argkeys=(DTYPE *)malloc(sizeof(DTYPE)*sizeData*NTHREADSCPU);
double tstart=omp_get_wtime();
for (int i=0; i<numFreq; i++)
{
// int tid=omp_get_thread_num();
// //Offsets into arrays for each thread
// unsigned int offset_n=tid*n;
// unsigned int offset_sc=tid*n*8;
DTYPE p=1.0/(minFreq+(deltaf*i));
for (unsigned int j=0; j<sizeData; j++)
{
t1[j]=fmod(tt[j],p)/p;
}
//Do argsort on t1
// sortKeyValuePairsIntDouble(argkeys, t1, sizeData);
sortKeyValuePairsIntFloatDouble(argkeys, t1, sizeData);
//Map t1, data, and weights to the order given by argsorting t1
mapArr(t1, t1_sortby_argkeys, argkeys, sizeData);
mapArr(data, data_sortby_argkeys, argkeys, sizeData);
mapArr(weights, weights_sortby_argkeys, argkeys, sizeData);
// chi2[i]=supsmu_chi2(n, t1_sortby_argkeys+offset_n, data_sortby_argkeys+offset_n, weights_sortby_argkeys+offset_n, smo+offset_n, sc+offset_sc, alpha);
//chi2
int iper=1;
DTYPE span=0.0;
supsmu(sizeData, t1_sortby_argkeys, data_sortby_argkeys, weights_sortby_argkeys, iper, span, alpha, smo, sc);
DTYPE tmptotal=0;
for (unsigned int k=0; k<sizeData; k++){
tmptotal+=((data_sortby_argkeys[k]-smo[k])*(data_sortby_argkeys[k]-smo[k]))*weights_sortby_argkeys[k];
}
chi2[i]=tmptotal/(sizeData*1.0);
}
double tend=omp_get_wtime();
printf("\nTime main loop: %f", tend - tstart);
for (int i=0; i<numFreq; i++)
{
pgram[i]=(0.5*(chi0-chi2[i])*sizeData)/chi0;
}
// double foundperiodtest=0;
int objIdx=0; // placeholder until we loop over objects
computePeriodSuperSmoother(pgram, numFreq, minFreq, maxFreq, &foundPeriod[objIdx]);
printf("\nFound period: %f", foundPeriod[objIdx]);
}
//First step: making one big function except the kernel call
//will call on the CPU first for testing
void supsmu_periodogram_GPU_BatchOneThread(unsigned int * objectId, unsigned int sizeData, const DTYPE minFreq, const DTYPE maxFreq, int numFreq, DTYPE * time, DTYPE * data, DTYPE * error, DTYPE alpha, DTYPE * pgram, DTYPE * foundPeriod)
{
struct lookupObj * objectLookup=NULL;
unsigned int numUniqueObjects;
computeObjectRanges(objectId, &sizeData, &objectLookup, &numUniqueObjects);
pgram=(DTYPE *)malloc(sizeof(DTYPE)*(numFreq)*numUniqueObjects);
foundPeriod=(DTYPE *)malloc(sizeof(DTYPE)*numUniqueObjects);
//Allocate once
DTYPE * chi2=(DTYPE *)malloc(sizeof(DTYPE)*numFreq*numUniqueObjects);
DTYPE * y = (DTYPE *)malloc(sizeof(DTYPE)*sizeData);
DTYPE * weights = (DTYPE *)malloc(sizeof(DTYPE)*sizeData);
DTYPE * tt = (DTYPE *)malloc(sizeof(DTYPE)*sizeData);
DTYPE deltaf=(maxFreq-minFreq)/(numFreq*1.0);
//compute minimum time
DTYPE minTime=time[0];
for (unsigned int i=0; i<sizeData; i++)
{
if (time[i]<minTime)
{
minTime=time[i];
}
}
DTYPE w0=0.0;
DTYPE tmp=0;
for (unsigned int i=0; i<sizeData; i++)
{
tt[i]=time[i]-minTime;
weights[i]=1.0/(error[i]*error[i]);
w0+=weights[i];
tmp+=(data[i]*weights[i]);
}
w0=w0/(sizeData*1.0);
tmp=tmp/(sizeData*1.0);
DTYPE y0=tmp/w0;
std::copy(data, data+sizeData, y);
// DTYPE tmp=0;
// for (int i=0; i<sizeData; i++)
// {
// tmp+=(data[i]*weights[i]);
// }
// tmp=tmp/(sizeData*1.0);
// DTYPE y0=tmp/w0;
tmp=0;
for (unsigned int i=0; i<sizeData; i++)
{
y[i]=y[i]-y0;
tmp+=(y[i]*y[i])*weights[i];
}
//
// tmp=0;
// for (int i=0; i<sizeData; i++)
// {
// tmp+=(y[i]*y[i])*weights[i];
// }
DTYPE chi0=tmp/(sizeData*1.0);
//Arrays that need to be allocated for each thread
DTYPE * sc=(DTYPE *)malloc(sizeof(DTYPE)*sizeData*7);
DTYPE * smo=(DTYPE *)malloc(sizeof(DTYPE)*sizeData);
DTYPE * t1=(DTYPE *)malloc(sizeof(DTYPE)*sizeData);
int * argkeys=(int *)malloc(sizeof(int)*sizeData);
DTYPE * t1_sortby_argkeys=(DTYPE *)malloc(sizeof(DTYPE)*sizeData);
DTYPE * data_sortby_argkeys=(DTYPE *)malloc(sizeof(DTYPE)*sizeData);
DTYPE * weights_sortby_argkeys=(DTYPE *)malloc(sizeof(DTYPE)*sizeData);
DTYPE * dev_sc;
DTYPE * dev_smo;
DTYPE * dev_t1;
// int * dev_argkeys;
DTYPE * dev_t1_sortby_argkeys;
DTYPE * dev_data_sortby_argkeys;
DTYPE * dev_weights_sortby_argkeys;
//allocate memory on the GPU
gpuErrchk(hipMalloc((void**)&dev_sc, sizeof(DTYPE)*(sizeData*7)));
gpuErrchk(hipMalloc((void**)&dev_smo, sizeof(DTYPE)*(sizeData)));
gpuErrchk(hipMalloc((void**)&dev_t1, sizeof(DTYPE)*(sizeData)));
// gpuErrchk(hipMalloc((void**)&dev_argkeys, sizeof(int)*(sizeData)));
gpuErrchk(hipMalloc((void**)&dev_t1_sortby_argkeys, sizeof(DTYPE)*(sizeData)));
gpuErrchk(hipMalloc((void**)&dev_data_sortby_argkeys, sizeof(DTYPE)*(sizeData)));
gpuErrchk(hipMalloc((void**)&dev_weights_sortby_argkeys, sizeof(DTYPE)*(sizeData)));
double tstart=omp_get_wtime();
for (int i=0; i<numFreq; i++)
{
// int tid=omp_get_thread_num();
// //Offsets into arrays for each thread
// unsigned int offset_n=tid*n;
// unsigned int offset_sc=tid*n*8;
DTYPE p=1.0/(minFreq+(deltaf*i));
for (unsigned int j=0; j<sizeData; j++)
{
t1[j]=fmod(tt[j],p)/p;
}
//Do argsort on t1
sortKeyValuePairsIntFloatDouble(argkeys, t1, sizeData);
//Map t1, data, and weights to the order given by argsorting t1
mapArr(t1, t1_sortby_argkeys, argkeys, sizeData);
mapArr(data, data_sortby_argkeys, argkeys, sizeData);
mapArr(weights, weights_sortby_argkeys, argkeys, sizeData);
// chi2[i]=supsmu_chi2(n, t1_sortby_argkeys+offset_n, data_sortby_argkeys+offset_n, weights_sortby_argkeys+offset_n, smo+offset_n, sc+offset_sc, alpha);
//chi2
int iper=1;
DTYPE span=0.0;
//copy data to the GPU
// gpuErrchk(hipMemcpy( dev_timeX, timeX, sizeof(DTYPE)*(*sizeData), hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy( dev_sc, sc, sizeof(DTYPE)*sizeData*7, hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy( dev_t1, t1, sizeof(DTYPE)*sizeData, hipMemcpyHostToDevice));
// gpuErrchk(hipMemcpy( dev_argkeys, argkeys, sizeof(int)*sizeData, hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy( dev_t1_sortby_argkeys, t1_sortby_argkeys, sizeof(DTYPE)*sizeData, hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy( dev_data_sortby_argkeys, data_sortby_argkeys, sizeof(DTYPE)*sizeData, hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy( dev_weights_sortby_argkeys,weights_sortby_argkeys, sizeof(DTYPE)*sizeData, hipMemcpyHostToDevice));
//For testing, compute this on the GPU
// supsmu(sizeData, t1_sortby_argkeys, data_sortby_argkeys, weights_sortby_argkeys, iper, span, alpha, smo, sc);
const int constSizeData=(int)sizeData;
const int constalpha=alpha;
const DTYPE constspan=span;
const int constiper=iper;
hipLaunchKernelGGL(( supsmukernelOneThread), dim3(1),dim3(1), 0, 0, constSizeData, dev_t1_sortby_argkeys, dev_data_sortby_argkeys, dev_weights_sortby_argkeys, constiper, constspan, constalpha, dev_smo, dev_sc);
gpuErrchk(hipMemcpy( smo, dev_smo, sizeof(DTYPE)*sizeData, hipMemcpyDeviceToHost));
DTYPE tmptotal=0;
for (unsigned int k=0; k<sizeData; k++){
tmptotal+=((data_sortby_argkeys[k]-smo[k])*(data_sortby_argkeys[k]-smo[k]))*weights_sortby_argkeys[k];
}
chi2[i]=tmptotal/(sizeData*1.0);
}
double tend=omp_get_wtime();
printf("\nTime main loop: %f", tend - tstart);
for (int i=0; i<numFreq; i++)
{
pgram[i]=(0.5*(chi0-chi2[i])*sizeData)/chi0;
}
// double foundperiodtest=0;
int objIdx=0; // placeholder until we loop over objects
computePeriodSuperSmoother(pgram, numFreq, minFreq, maxFreq, &foundPeriod[objIdx]);
printf("\nFound period: %f", foundPeriod[objIdx]);
}
//Need to do back to back sorts to sort the t1 by argkeys for each frequency
//Need 3 arrays:
//first sort the keys (argkeys) by the values (t1)
//then sort the argkeys/t1 by the freqarr
void backToBackSort(int * dev_argkeys, int * dev_freqarr, DTYPE * dev_t1, int sizeData, int numFreq, hipStream_t stream)
{
thrust::device_ptr<int> dev_argkeys_ptr(dev_argkeys);
thrust::device_ptr<DTYPE> dev_t1_ptr(dev_t1);
thrust::device_ptr<int> dev_freqarr_ptr(dev_freqarr);
try{
thrust::stable_sort_by_key(thrust::hip::par.on(stream), dev_t1_ptr, dev_t1_ptr + (sizeData*numFreq),
thrust::make_zip_iterator(thrust::make_tuple(dev_argkeys_ptr, dev_freqarr_ptr)));
thrust::stable_sort_by_key(thrust::hip::par.on(stream), dev_freqarr_ptr, dev_freqarr_ptr + (sizeData*numFreq),
thrust::make_zip_iterator(thrust::make_tuple(dev_argkeys_ptr, dev_t1_ptr)));
}
catch(thrust::system_error e)
{
std::cerr << "Error inside sort: " << e.what() << std::endl;
exit(-1);
}
}
//GPU supersmoother original with multiple passes
void supsmu_original_single_object(unsigned int * objectId, unsigned int sizeData, const DTYPE minFreq, const DTYPE maxFreq, int numFreq, DTYPE * time, DTYPE * data, DTYPE * error, DTYPE alpha, DTYPE * pgram, DTYPE * foundPeriod, double underestGPUcapacityGiB)
{
double tstartcpu=omp_get_wtime();
int iper=1;
DTYPE span=0.0;
DTYPE * weights = (DTYPE *)malloc(sizeof(DTYPE)*sizeData);
DTYPE * tt = (DTYPE *)malloc(sizeof(DTYPE)*sizeData);
const DTYPE deltaf=(maxFreq-minFreq)/(numFreq*1.0);
DTYPE chi0=0;
compute_chi0_tt_weights(sizeData, time, data, error, &chi0, tt, weights);
double tendcpu=omp_get_wtime();
printf("\nTime CPU preamble: %f", tendcpu-tstartcpu);
////////////////////////
//for batching the frequencies
//first 0-refers to using original supsmu mode
//second 0-refers to using NUMGPUs when computing the number of batches
unsigned int numBatches=computeNumBatches(0, sizeData, numFreq, underestGPUcapacityGiB, 0);
//upper limit on the number of frequencies in a batch
int numFreqPerBatch=ceil(numFreq*1.0/numBatches*1.0);
printf("\nNumber of batches: %d, Number of frequencies per batch: %d", numBatches, numFreqPerBatch);fflush(stdout);
double tstartcreatestream=omp_get_wtime();
hipStream_t batchstreams[NSTREAMSPERGPU*NUMGPU];
createStreams(batchstreams, NUMGPU, NSTREAMSPERGPU);
double tendcreatestream=omp_get_wtime();
// printf("\nTime to create streams: %f", tendcreatestream - tstartcreatestream);
//End for batching frequencies
////////////////////////
int * dev_freqarr[NUMGPU];
DTYPE * dev_smo[NUMGPU];
DTYPE * dev_t1[NUMGPU];
int * dev_argkeys[NUMGPU];
DTYPE * dev_t1_sortby_argkeys[NUMGPU];
DTYPE * dev_data_sortby_argkeys[NUMGPU];
DTYPE * dev_weights_sortby_argkeys[NUMGPU];
DTYPE * dev_tt[NUMGPU];
DTYPE * dev_data[NUMGPU];
DTYPE * dev_weights[NUMGPU];
DTYPE * dev_sc[NUMGPU];
DTYPE * dev_pgram[NUMGPU];
#pragma omp parallel for num_threads(NUMGPU)
for (int i=0; i<NUMGPU; i++)
{
int globaltid=omp_get_thread_num();
int tid=globaltid%NSTREAMSPERGPU;
int gpuid=globaltid/NSTREAMSPERGPU;
int streamnum=(gpuid*NSTREAMSPERGPU)+tid;
hipSetDevice(i);
//Those that depend on the number of frequencies (not the number per batch)
gpuErrchk(hipMalloc((void**)&dev_pgram[i], sizeof(DTYPE)*numFreq));
//Arrays broken up into batches based on frequency
gpuErrchk(hipMalloc((void**)&dev_freqarr[i], sizeof(int)*(sizeData*numFreqPerBatch*NSTREAMSPERGPU)));
gpuErrchk(hipMalloc((void**)&dev_argkeys[i], sizeof(int)*(sizeData*numFreqPerBatch*NSTREAMSPERGPU)));
gpuErrchk(hipMalloc((void**)&dev_sc[i], sizeof(DTYPE)*(sizeData*8*numFreqPerBatch*NSTREAMSPERGPU)));
gpuErrchk(hipMalloc((void**)&dev_smo[i], sizeof(DTYPE)*(sizeData*numFreqPerBatch*NSTREAMSPERGPU)));
gpuErrchk(hipMalloc((void**)&dev_t1[i], sizeof(DTYPE)*(sizeData*numFreqPerBatch*NSTREAMSPERGPU)));
gpuErrchk(hipMalloc((void**)&dev_t1_sortby_argkeys[i], sizeof(DTYPE)*(sizeData*numFreqPerBatch*NSTREAMSPERGPU)));
gpuErrchk(hipMalloc((void**)&dev_data_sortby_argkeys[i], sizeof(DTYPE)*(sizeData*numFreqPerBatch*NSTREAMSPERGPU)));
gpuErrchk(hipMalloc((void**)&dev_weights_sortby_argkeys[i], sizeof(DTYPE)*(sizeData*numFreqPerBatch*NSTREAMSPERGPU)));
//allocate on the GPU
gpuErrchk(hipMalloc((void**)&dev_tt[i], sizeof(DTYPE)*sizeData));
gpuErrchk(hipMalloc((void**)&dev_data[i], sizeof(DTYPE)*sizeData));
gpuErrchk(hipMalloc((void**)&dev_weights[i], sizeof(DTYPE)*sizeData));
//copy to the GPU
gpuErrchk(hipMemcpyAsync( dev_tt[i], tt, sizeof(DTYPE)*sizeData, hipMemcpyHostToDevice, batchstreams[streamnum]));
gpuErrchk(hipMemcpyAsync( dev_data[i], data, sizeof(DTYPE)*sizeData, hipMemcpyHostToDevice, batchstreams[streamnum]));
gpuErrchk(hipMemcpyAsync( dev_weights[i], weights, sizeof(DTYPE)*sizeData, hipMemcpyHostToDevice, batchstreams[streamnum]));
}
//Loop over batches
#pragma omp parallel for num_threads(NUMGPU*NSTREAMSPERGPU)
for (unsigned int i=0; i<numBatches; i++)
{
int globaltid=omp_get_thread_num();
//thread id for a single GPU
int tid=globaltid%NSTREAMSPERGPU;
int gpuid=globaltid/NSTREAMSPERGPU;
uint64_t batchWriteOffset=(uint64_t)i*(uint64_t)numFreqPerBatch;
uint64_t offsetFreqId=(uint64_t)i*(uint64_t)numFreqPerBatch;
int numFreqInBatch=numFreqPerBatch;
int streamOffset=sizeData*numFreqPerBatch*tid;
int streamnum=(gpuid*NSTREAMSPERGPU)+tid;
hipSetDevice(gpuid);
//last batch has fewer frequencies
if((numBatches!=1)&&(i==(numBatches-1)))
{
numFreqInBatch=min(numFreqInBatch,((int)numFreq)-((i)*numFreqPerBatch));
}
printf("\nglobal tid: %d, tid: %d, gpuid: %d, Stream num: %d, Batch Number: %d, number of frequencies: %d",globaltid, tid, gpuid, streamnum, i, numFreqInBatch);
unsigned int NUMBLOCKSDATAFREQ=ceil((sizeData*numFreqInBatch*1.0)/LARGEBLOCKSIZE*1.0);
hipLaunchKernelGGL(( computePeriodModFOneThreadPerUpdate), dim3(NUMBLOCKSDATAFREQ),dim3(LARGEBLOCKSIZE),0,batchstreams[streamnum], sizeData, numFreqInBatch, minFreq, offsetFreqId, deltaf, &dev_t1[gpuid][streamOffset], dev_tt[gpuid]);
//Initialize the key arrays
hipLaunchKernelGGL(( initializeKeyArraysOneThreadPerUpdate), dim3(NUMBLOCKSDATAFREQ),dim3(LARGEBLOCKSIZE),0,batchstreams[streamnum], sizeData, numFreqInBatch, &dev_argkeys[gpuid][streamOffset], &dev_freqarr[gpuid][streamOffset]);
//Need to do back to back sorts to sort the t1 by argkeys for each frequency
//Need 3 arrays:
//first sort the keys (argkeys) by the values (t1)
//then sort the argkeys/t1 by the freqarr
backToBackSort(&dev_argkeys[gpuid][streamOffset], &dev_freqarr[gpuid][streamOffset], &dev_t1[gpuid][streamOffset], sizeData, numFreqInBatch, batchstreams[streamnum]);
//Map the keys based on argkeys
//Separate map and transform
hipLaunchKernelGGL(( mapUsingArgKeysOneThreadPerUpdate), dim3(NUMBLOCKSDATAFREQ),dim3(LARGEBLOCKSIZE),0,batchstreams[streamnum], sizeData, numFreqInBatch, &dev_argkeys[gpuid][streamOffset], &dev_data[gpuid][0], &dev_weights[gpuid][0], &dev_t1[gpuid][streamOffset], &dev_t1_sortby_argkeys[gpuid][streamOffset], &dev_data_sortby_argkeys[gpuid][streamOffset], &dev_weights_sortby_argkeys[gpuid][streamOffset]);
///////////////////////////////
// Main kernels
///////////////////////////////
//global memory only
#if ORIGINALMODE==0
const unsigned int numBlocks=ceil((numFreqInBatch*1.0)/(SMALLBLOCKSIZE*1.0));
hipLaunchKernelGGL(( supsmukernel), dim3(numBlocks),dim3(SMALLBLOCKSIZE),0,batchstreams[streamnum], numFreqInBatch, sizeData, iper, span, alpha, &dev_smo[gpuid][streamOffset], &dev_sc[gpuid][streamOffset],
&dev_t1_sortby_argkeys[gpuid][streamOffset], &dev_data_sortby_argkeys[gpuid][streamOffset], &dev_weights_sortby_argkeys[gpuid][streamOffset]);
#endif
//One block per frequency with SM -- use small block size (e.g., 32 threads/block)
#if ORIGINALMODE==1
//Shared memory for x,y,z arrays and other information
const unsigned int SMSIZE=sizeof(DTYPE)*3*sizeData;
hipLaunchKernelGGL(( supsmukernelSMOneFreqBlock), dim3(numFreqInBatch),dim3(SMALLBLOCKSIZE),SMSIZE,batchstreams[streamnum], sizeData, iper, span, alpha, &dev_smo[gpuid][streamOffset], &dev_sc[gpuid][streamOffset],
&dev_t1_sortby_argkeys[gpuid][streamOffset], &dev_data_sortby_argkeys[gpuid][streamOffset], &dev_weights_sortby_argkeys[gpuid][streamOffset]);
#endif
#if ORIGINALMODE==2
//uses one thread per frequency with shared memory for x,y,z arrays
const unsigned int numBlocks=ceil((numFreqInBatch*1.0)/(SMALLBLOCKSIZE*1.0));
const unsigned int SMSIZEDATA=sizeof(DTYPE)*3*sizeData*SMALLBLOCKSIZE;
hipLaunchKernelGGL(( supsmukernelSMOneThreadPerFreq), dim3(numBlocks),dim3(SMALLBLOCKSIZE),SMSIZEDATA,batchstreams[streamnum], numFreqInBatch, sizeData, iper, span, alpha,
&dev_smo[gpuid][streamOffset], &dev_sc[gpuid][streamOffset], &dev_t1_sortby_argkeys[gpuid][streamOffset], &dev_data_sortby_argkeys[gpuid][streamOffset], &dev_weights_sortby_argkeys[gpuid][streamOffset]);
#endif
//Cascade the execution so that it is robust to running out of shared memory
//Try executing SM kernel 1 thread per freq
//then global memory kernel (which is guaranteed to execute)
#if ORIGINALMODE==-1
printf("\n[CASCADE] Cascade mode, launching SM one thread per frequency");
//First, attempt 1 thread per frequency with SM
const unsigned int numBlocks=ceil((numFreqInBatch*1.0)/(SMALLBLOCKSIZE*1.0));
const unsigned int SMSIZEDATA=sizeof(DTYPE)*3*sizeData*SMALLBLOCKSIZE;
hipLaunchKernelGGL(( supsmukernelSMOneThreadPerFreq), dim3(numBlocks),dim3(SMALLBLOCKSIZE),SMSIZEDATA,batchstreams[streamnum], numFreqInBatch, sizeData, iper, span, alpha,
&dev_smo[gpuid][streamOffset], &dev_sc[gpuid][streamOffset], &dev_t1_sortby_argkeys[gpuid][streamOffset], &dev_data_sortby_argkeys[gpuid][streamOffset], &dev_weights_sortby_argkeys[gpuid][streamOffset]);
// if (err != hipSuccess)
// {
// printf("\n[CASCADE] Launching SM- 1 block per frequency");
// const unsigned int SMSIZE=sizeof(DTYPE)*3*sizeData;
//hipLaunchKernelGGL(( supsmukernelSMOneFreqBlock), dim3(numFreqInBatch),dim3(SMALLBLOCKSIZE),SMSIZE,batchstreams[streamnum], sizeData, iper, span, alpha, &dev_smo[gpuid][streamOffset], &dev_sc[gpuid][streamOffset],
// &dev_t1_sortby_argkeys[gpuid][streamOffset], &dev_data_sortby_argkeys[gpuid][streamOffset], &dev_weights_sortby_argkeys[gpuid][streamOffset]);
//execute global memory kernel
// hipError_t err2 = hipPeekAtLastError();
hipError_t err2 = hipGetLastError();
if (err2 != hipSuccess)
{
// std::cout << "\nCUDA error: " << hipGetErrorString(err2);
// printf("\n Launching global memory kernel");
printf("\n[CASCADE] Launching global memory kernel");
const unsigned int numBlocks=ceil((numFreqInBatch*1.0)/(SMALLBLOCKSIZE*1.0));
hipLaunchKernelGGL(( supsmukernel), dim3(numBlocks),dim3(SMALLBLOCKSIZE),0,batchstreams[streamnum], numFreqInBatch, sizeData, iper, span, alpha, &dev_smo[gpuid][streamOffset], &dev_sc[gpuid][streamOffset],
&dev_t1_sortby_argkeys[gpuid][streamOffset], &dev_data_sortby_argkeys[gpuid][streamOffset], &dev_weights_sortby_argkeys[gpuid][streamOffset]);
}
// }
#endif
///////////////////////////////
// Main kernels
///////////////////////////////
//Some number of threads per frequency
unsigned int numThreadPerFreq2=8; //must divide evenly into the block size
unsigned int NUMBLOCKS10=ceil((numFreqInBatch*numThreadPerFreq2*1.0)/(LARGEBLOCKSIZE*1.0));
const unsigned int SMSIZE2=sizeof(DTYPE)*(LARGEBLOCKSIZE/numThreadPerFreq2);
hipLaunchKernelGGL(( computePgramReduction), dim3(NUMBLOCKS10), dim3(LARGEBLOCKSIZE), SMSIZE2, batchstreams[streamnum], batchWriteOffset, numThreadPerFreq2, chi0, sizeData, numFreqInBatch, &dev_smo[gpuid][streamOffset], &dev_data_sortby_argkeys[gpuid][streamOffset], &dev_weights_sortby_argkeys[gpuid][streamOffset], &dev_pgram[gpuid][0]);
//Copy pgram back to host
gpuErrchk(hipMemcpyAsync(pgram+batchWriteOffset, &dev_pgram[gpuid][batchWriteOffset], sizeof(DTYPE)*numFreqInBatch, hipMemcpyDeviceToHost, batchstreams[streamnum]));
// fprintf(stderr,"\nBatch: %d, write pgram range: %d, %d ",i, batchWriteOffset,batchWriteOffset+numFreqInBatch);
} //end loop over batches
// hipError_t err = hipGetLastError(); // add
// if (err != hipSuccess) std::cout << "CUDA error: " << hipGetErrorString(err) << std::endl; // add
///////////////////////////////
// End main kernels
///////////////////////////////
computePeriodSuperSmoother(pgram, numFreq, minFreq, maxFreq, foundPeriod);
printf("\nFound period: %f", *foundPeriod);
double tstartfree=omp_get_wtime();
//free device data
for (int i=0; i<NUMGPU; i++)
{
hipFree(dev_sc[i]);
hipFree(dev_pgram[i]);
hipFree(dev_freqarr[i]);
hipFree(dev_argkeys[i]);
hipFree(dev_smo[i]);
hipFree(dev_t1[i]);
hipFree(dev_t1_sortby_argkeys[i]);
hipFree(dev_data_sortby_argkeys[i]);
hipFree(dev_weights_sortby_argkeys[i]);
hipFree(dev_tt[i]);
hipFree(dev_data[i]);
hipFree(dev_weights[i]);
}
//free host data
free(weights);
free(tt);
double tendfree=omp_get_wtime();
printf("\nTime to free: %f", tendfree - tstartfree);
}
//Only use a single GPU
void supsmu_original_single_gpu(unsigned int * objectId, unsigned int sizeData, const DTYPE minFreq, const DTYPE maxFreq, int numFreq, DTYPE * time, DTYPE * data, DTYPE * error, DTYPE alpha, DTYPE * pgram, DTYPE * foundPeriod, double underestGPUcapacityGiB, int gpuid)
{
printf("\nObject Id: %u", *objectId);fflush(stdout);
double tstartcpu=omp_get_wtime();
int iper=1;
DTYPE span=0.0;
DTYPE * weights = (DTYPE *)malloc(sizeof(DTYPE)*sizeData);
DTYPE * tt = (DTYPE *)malloc(sizeof(DTYPE)*sizeData);
const DTYPE deltaf=(maxFreq-minFreq)/(numFreq*1.0);
DTYPE chi0=0;
compute_chi0_tt_weights(sizeData, time, data, error, &chi0, tt, weights);
////////////////////////
//for batching the frequencies
//0-refers to using original supsmu mode
//1- a flag referring to using a single GPU
unsigned int numBatches=computeNumBatches(0, sizeData, numFreq, underestGPUcapacityGiB, 1);
//upper limit on the number of frequencies in a batch
unsigned int numFreqPerBatch=ceil(numFreq*1.0/numBatches*1.0);
printf("\nObject Id: %u, Number of batches: %u, Number of frequencies per batch: %u", *objectId, numBatches, numFreqPerBatch);fflush(stdout);
double tstartcreatestream=omp_get_wtime();
hipStream_t batchstreams[NSTREAMSPERGPU*1];
createStreamsOneGPU(batchstreams, NSTREAMSPERGPU, gpuid);
double tendcreatestream=omp_get_wtime();
// printf("\nTime to create streams: %f", tendcreatestream - tstartcreatestream);
//End for batching frequencies
////////////////////////
int * dev_freqarr[1];
DTYPE * dev_smo[1];
DTYPE * dev_t1[1];
int * dev_argkeys[1];
DTYPE * dev_t1_sortby_argkeys[1];
DTYPE * dev_data_sortby_argkeys[1];
DTYPE * dev_weights_sortby_argkeys[1];
DTYPE * dev_tt[1];
DTYPE * dev_data[1];
DTYPE * dev_weights[1];
DTYPE * dev_sc[1];
DTYPE * dev_pgram[1];
//loop used to be here
hipSetDevice(gpuid);
//Those that depend on the number of frequencies (not the number per batch)
gpuErrchk(hipMalloc((void**)&dev_pgram[0], sizeof(DTYPE)*numFreq));
//Arrays broken up into batches based on frequency
gpuErrchk(hipMalloc((void**)&dev_freqarr[0], sizeof(int)*(sizeData*numFreqPerBatch*NSTREAMSPERGPU)));
gpuErrchk(hipMalloc((void**)&dev_argkeys[0], sizeof(int)*(sizeData*numFreqPerBatch*NSTREAMSPERGPU)));
gpuErrchk(hipMalloc((void**)&dev_sc[0], sizeof(DTYPE)*(sizeData*8*numFreqPerBatch*NSTREAMSPERGPU)));
gpuErrchk(hipMalloc((void**)&dev_smo[0], sizeof(DTYPE)*(sizeData*numFreqPerBatch*NSTREAMSPERGPU)));
gpuErrchk(hipMalloc((void**)&dev_t1[0], sizeof(DTYPE)*(sizeData*numFreqPerBatch*NSTREAMSPERGPU)));
gpuErrchk(hipMalloc((void**)&dev_t1_sortby_argkeys[0], sizeof(DTYPE)*(sizeData*numFreqPerBatch*NSTREAMSPERGPU)));
gpuErrchk(hipMalloc((void**)&dev_data_sortby_argkeys[0], sizeof(DTYPE)*(sizeData*numFreqPerBatch*NSTREAMSPERGPU)));
gpuErrchk(hipMalloc((void**)&dev_weights_sortby_argkeys[0], sizeof(DTYPE)*(sizeData*numFreqPerBatch*NSTREAMSPERGPU)));
//allocate on the GPU
gpuErrchk(hipMalloc((void**)&dev_tt[0], sizeof(DTYPE)*sizeData));
gpuErrchk(hipMalloc((void**)&dev_data[0], sizeof(DTYPE)*sizeData));
gpuErrchk(hipMalloc((void**)&dev_weights[0], sizeof(DTYPE)*sizeData));
//copy to the GPU
gpuErrchk(hipMemcpyAsync( dev_tt[0], tt, sizeof(DTYPE)*sizeData, hipMemcpyHostToDevice, batchstreams[0]));
gpuErrchk(hipMemcpyAsync( dev_data[0], data, sizeof(DTYPE)*sizeData, hipMemcpyHostToDevice, batchstreams[0]));
gpuErrchk(hipMemcpyAsync( dev_weights[0], weights, sizeof(DTYPE)*sizeData, hipMemcpyHostToDevice, batchstreams[0]));
//Loop over batches
#pragma omp parallel for num_threads(NSTREAMSPERGPU)
for (unsigned int i=0; i<numBatches; i++)
{
hipSetDevice(gpuid);
int globaltid=omp_get_thread_num();
//thread id for a single GPU
int tid=globaltid%NSTREAMSPERGPU;
uint64_t batchWriteOffset=(uint64_t)i*(uint64_t)numFreqPerBatch;
uint64_t offsetFreqId=(uint64_t)i*(uint64_t)numFreqPerBatch;
unsigned int numFreqInBatch=numFreqPerBatch;
unsigned int streamOffset=sizeData*numFreqPerBatch*tid;
int streamnum=tid;
//last batch has fewer frequencies
if((numBatches!=1)&&(i==(numBatches-1)))
{
numFreqInBatch=min(numFreqInBatch,((int)numFreq)-((i)*numFreqPerBatch));
}
printf("\nglobal tid: %d, tid: %d, gpuid: %d, Stream num: %u, Batch Number: %u, number of frequencies: %u",globaltid, tid, gpuid, streamnum, i, numFreqInBatch);
unsigned int NUMBLOCKSDATAFREQ=ceil((sizeData*numFreqInBatch*1.0)/LARGEBLOCKSIZE*1.0);
hipLaunchKernelGGL(( computePeriodModFOneThreadPerUpdate), dim3(NUMBLOCKSDATAFREQ),dim3(LARGEBLOCKSIZE),0,batchstreams[streamnum], sizeData, numFreqInBatch, minFreq, offsetFreqId, deltaf, &dev_t1[0][streamOffset], dev_tt[0]);
//Initialize the key arrays
hipLaunchKernelGGL(( initializeKeyArraysOneThreadPerUpdate), dim3(NUMBLOCKSDATAFREQ),dim3(LARGEBLOCKSIZE),0,batchstreams[streamnum], sizeData, numFreqInBatch, &dev_argkeys[0][streamOffset], &dev_freqarr[0][streamOffset]);
//Need to do back to back sorts to sort the t1 by argkeys for each frequency
//Need 3 arrays:
//first sort the keys (argkeys) by the values (t1)
//then sort the argkeys/t1 by the freqarr
backToBackSort(&dev_argkeys[0][streamOffset], &dev_freqarr[0][streamOffset], &dev_t1[0][streamOffset], sizeData, numFreqInBatch, batchstreams[streamnum]);
//Map the keys based on argkeys
//Separate map and transform
hipLaunchKernelGGL(( mapUsingArgKeysOneThreadPerUpdate), dim3(NUMBLOCKSDATAFREQ),dim3(LARGEBLOCKSIZE),0,batchstreams[streamnum], sizeData, numFreqInBatch, &dev_argkeys[0][streamOffset], &dev_data[0][0], &dev_weights[0][0], &dev_t1[0][streamOffset], &dev_t1_sortby_argkeys[0][streamOffset], &dev_data_sortby_argkeys[0][streamOffset], &dev_weights_sortby_argkeys[0][streamOffset]);
///////////////////////////////
// Main kernels
///////////////////////////////
//global memory only
#if ORIGINALMODE==0
const unsigned int numBlocks=ceil((numFreqInBatch*1.0)/(SMALLBLOCKSIZE*1.0));
hipLaunchKernelGGL(( supsmukernel), dim3(numBlocks),dim3(SMALLBLOCKSIZE),0,batchstreams[streamnum], numFreqInBatch, sizeData, iper, span, alpha, &dev_smo[0][streamOffset], &dev_sc[0][streamOffset],
&dev_t1_sortby_argkeys[0][streamOffset], &dev_data_sortby_argkeys[0][streamOffset], &dev_weights_sortby_argkeys[0][streamOffset]);
#endif
//One block per frequency with SM -- use small block size (e.g., 32 threads/block)
#if ORIGINALMODE==1
//Shared memory for x,y,z arrays and other information
const unsigned int SMSIZE=sizeof(DTYPE)*3*sizeData;
hipLaunchKernelGGL(( supsmukernelSMOneFreqBlock), dim3(numFreqInBatch),dim3(SMALLBLOCKSIZE),SMSIZE,batchstreams[streamnum], sizeData, iper, span, alpha, &dev_smo[0][streamOffset], &dev_sc[0][streamOffset],
&dev_t1_sortby_argkeys[0][streamOffset], &dev_data_sortby_argkeys[0][streamOffset], &dev_weights_sortby_argkeys[0][streamOffset]);
#endif
#if ORIGINALMODE==2
//uses one thread per frequency with shared memory for x,y,z arrays
const unsigned int numBlocks=ceil((numFreqInBatch*1.0)/(SMALLBLOCKSIZE*1.0));
const unsigned int SMSIZEDATA=sizeof(DTYPE)*3*sizeData*SMALLBLOCKSIZE;
hipLaunchKernelGGL(( supsmukernelSMOneThreadPerFreq), dim3(numBlocks),dim3(SMALLBLOCKSIZE),SMSIZEDATA,batchstreams[streamnum], numFreqInBatch, sizeData, iper, span, alpha,
&dev_smo[0][streamOffset], &dev_sc[0][streamOffset], &dev_t1_sortby_argkeys[0][streamOffset], &dev_data_sortby_argkeys[0][streamOffset], &dev_weights_sortby_argkeys[0][streamOffset]);
#endif
//Cascade the execution so that it is robust to running out of shared memory
//Try executing SM kernel 1 thread per freq,
//then global memory kernel (which is guaranteed to execute)
#if ORIGINALMODE==-1
printf("\nCascade mode");
//First, attempt 1 thread per frequency with SM
const unsigned int numBlocks=ceil((numFreqInBatch*1.0)/(SMALLBLOCKSIZE*1.0));
const unsigned int SMSIZEDATA=sizeof(DTYPE)*3*sizeData*SMALLBLOCKSIZE;
hipLaunchKernelGGL(( supsmukernelSMOneThreadPerFreq), dim3(numBlocks),dim3(SMALLBLOCKSIZE),SMSIZEDATA,batchstreams[streamnum], numFreqInBatch, sizeData, iper, span, alpha,
&dev_smo[0][streamOffset], &dev_sc[0][streamOffset], &dev_t1_sortby_argkeys[0][streamOffset], &dev_data_sortby_argkeys[0][streamOffset], &dev_weights_sortby_argkeys[0][streamOffset]);
// hipError_t err = hipGetLastError();
// if (err != hipSuccess)
// {
// const unsigned int SMSIZE=sizeof(DTYPE)*3*sizeData;
// hipLaunchKernelGGL(( supsmukernelSMOneFreqBlock), dim3(numFreqInBatch),dim3(SMALLBLOCKSIZE),SMSIZE,batchstreams[streamnum], sizeData, iper, span, alpha, &dev_smo[0][streamOffset], &dev_sc[0][streamOffset],
// &dev_t1_sortby_argkeys[0][streamOffset], &dev_data_sortby_argkeys[0][streamOffset], &dev_weights_sortby_argkeys[0][streamOffset]);
//execute global memory kernel
// hipError_t err2 = hipPeekAtLastError();
hipError_t err2 = hipGetLastError();
if (err2 != hipSuccess)
{
// std::cout << "\nCUDA error: " << hipGetErrorString(err2);
printf("\n Launching global memory kernel");
const unsigned int numBlocks=ceil((numFreqInBatch*1.0)/(SMALLBLOCKSIZE*1.0));
hipLaunchKernelGGL(( supsmukernel), dim3(numBlocks),dim3(SMALLBLOCKSIZE),0,batchstreams[streamnum], numFreqInBatch, sizeData, iper, span, alpha, &dev_smo[0][streamOffset], &dev_sc[0][streamOffset],
&dev_t1_sortby_argkeys[0][streamOffset], &dev_data_sortby_argkeys[0][streamOffset], &dev_weights_sortby_argkeys[0][streamOffset]);
}
// }
#endif
///////////////////////////////
// Main kernels
///////////////////////////////
//Some number of threads per frequency
unsigned int numThreadPerFreq2=8; //must divide evenly into the block size
unsigned int NUMBLOCKS10=ceil((numFreqInBatch*numThreadPerFreq2*1.0)/(LARGEBLOCKSIZE*1.0));
const unsigned int SMSIZE2=sizeof(DTYPE)*(LARGEBLOCKSIZE/numThreadPerFreq2);
hipLaunchKernelGGL(( computePgramReduction), dim3(NUMBLOCKS10), dim3(LARGEBLOCKSIZE), SMSIZE2, batchstreams[streamnum], batchWriteOffset, numThreadPerFreq2, chi0, sizeData, numFreqInBatch, &dev_smo[0][streamOffset], &dev_data_sortby_argkeys[0][streamOffset], &dev_weights_sortby_argkeys[0][streamOffset], &dev_pgram[0][0]);
//Copy pgram back to host
gpuErrchk(hipMemcpyAsync(pgram+batchWriteOffset, &dev_pgram[0][batchWriteOffset], sizeof(DTYPE)*numFreqInBatch, hipMemcpyDeviceToHost, batchstreams[streamnum]));
// fprintf(stderr,"\nBatch: %d, write pgram range: %d, %d ",i, batchWriteOffset,batchWriteOffset+numFreqInBatch);
} //end loop over batches
// hipError_t err = hipGetLastError(); // add
// if (err != hipSuccess) std::cout << "CUDA error: " << hipGetErrorString(err) << std::endl; // add
///////////////////////////////
// End main kernels
///////////////////////////////
computePeriodSuperSmoother(pgram, numFreq, minFreq, maxFreq, foundPeriod);
printf("\nFound period: %f", *foundPeriod);
// //free device data
// for (int i=0; i<NUMGPU; i++)
// {
double tstartfree=omp_get_wtime();
hipFree(dev_sc[0]);
hipFree(dev_pgram[0]);
hipFree(dev_freqarr[0]);
hipFree(dev_argkeys[0]);
hipFree(dev_smo[0]);
hipFree(dev_t1[0]);
hipFree(dev_t1_sortby_argkeys[0]);
hipFree(dev_data_sortby_argkeys[0]);
hipFree(dev_weights_sortby_argkeys[0]);
hipFree(dev_tt[0]);
hipFree(dev_data[0]);
hipFree(dev_weights[0]);
// }
destroyStreamsOneGPU(batchstreams, NSTREAMSPERGPU, gpuid);
//free host data
free(weights);
free(tt);
double tendfree=omp_get_wtime();
printf("\nTime to free: %f", tendfree - tstartfree);
}
void compute_chi0_tt_weights(unsigned int sizeData, DTYPE * time, DTYPE * data, DTYPE * error, DTYPE * chi0, DTYPE * tt, DTYPE * weights)
{
// DTYPE * y = (DTYPE *)malloc(sizeof(DTYPE)*sizeData);
DTYPE y=0;
//compute minimum time
DTYPE minTime=time[0];
for (unsigned int i=0; i<sizeData; i++)
{
if (time[i]<minTime)
{
minTime=time[i];
}
}
DTYPE w0=0.0;
DTYPE tmp=0;
for (unsigned int i=0; i<sizeData; i++)
{
tt[i]=time[i]-minTime;
weights[i]=1.0/(error[i]*error[i]);
w0+=weights[i];
tmp+=(data[i]*weights[i]);
}
w0=w0/(sizeData*1.0);
tmp=tmp/(sizeData*1.0);
DTYPE y0=tmp/w0;
tmp=0;
for (unsigned int i=0; i<sizeData; i++)
{
y=data[i]-y0;
tmp+=(y*y)*weights[i];
}
*chi0=tmp/(sizeData*1.0);
}
double getGPUCapacity()
{
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
//Read the global memory capacity from the device.
unsigned long int globalmembytes=0;
gpuErrchk(hipMemGetInfo(NULL,&globalmembytes));
double totalcapacityGiB=globalmembytes*1.0/(1024*1024*1024.0);
printf("\n[Device name: %s, Detecting GPU Global Memory Capacity] Size in GiB: %f", prop.name, totalcapacityGiB);
double underestcapacityGiB=totalcapacityGiB*BETA;
printf("\n[Underestimating GPU Global Memory Capacity (BETA: %f)] Size in GiB: %f", BETA, underestcapacityGiB);
return underestcapacityGiB;
}
double computedeltaf(lookupObj * objectLookup, DTYPE * time, unsigned int numUniqueObjects)
{
//Find the maximum time span for all objects
double maxTimeSpan=0;
#pragma omp parallel for reduction(max: maxTimeSpan)
for (unsigned int i=0; i<numUniqueObjects; i++)
{
unsigned int idxMin=objectLookup[i].idxMin;
unsigned int idxMax=objectLookup[i].idxMax;
double timeSpan=time[idxMax]-time[idxMin];
if (maxTimeSpan<timeSpan)
{
maxTimeSpan=timeSpan;
}
}
double df=0.1/maxTimeSpan;
return df;
}
//mode-0 original supsmu
//mode-1 single pass supsmu
void supsmu_gpu_batch(const bool mode, unsigned int * objectId, unsigned int sizeData, const DTYPE minFreq, const DTYPE maxFreq, unsigned int numFreq, DTYPE * time, DTYPE * data, DTYPE * error, DTYPE alpha, DTYPE ** pgram, DTYPE * sumPeriods)
{
double tstartpreamble=omp_get_wtime();
//get the global memory capacity of the GPU and then underestimate it so that we don't have out of memory errors
double underestGPUcapacityGiB=getGPUCapacity();
//For doing batch processing
struct lookupObj * objectLookup=NULL;
unsigned int numUniqueObjects;
computeObjectRanges(objectId, &sizeData, &objectLookup, &numUniqueObjects);
//utility function: compute deltaf
// double deltaf=computedeltaf(objectLookup, time, numUniqueObjects);
// printf("\nDelta f: %f", deltaf);
//allocate memory for the pgram
#ifndef PYTHON
*pgram=(DTYPE *)malloc(sizeof(DTYPE)*(uint64_t)numFreq*(uint64_t)numUniqueObjects);
#endif
// printf("\nPgram GiB: %f", (sizeof(DTYPE)*numFreq*numUniqueObjects*1.0)/(1024*1024*1024.0));
// *pgram=(DTYPE *)calloc((unsigned int)numFreq*numUniqueObjects,sizeof(DTYPE));
DTYPE * periods=(DTYPE *)malloc(sizeof(DTYPE)*numUniqueObjects);
// DTYPE * periods=(DTYPE *)calloc(numUniqueObjects,sizeof(DTYPE));
//number of objects skipped because they didn't have enough observations
unsigned int countSkippedObjectsThresh=0;
//Computing SS is parallelized as follows:
//1) If you are computing a single object, parallelize the object across multiple GPUs
//2) If you are computing a batch of objects, execute a single object per GPU (assuming you are using multiple GPUs)
double tendpreamble=omp_get_wtime();
printf("\nPreabmble before calling main function: %f", tendpreamble - tstartpreamble);
//1) single object-- parallelize single object on multiple GPUs
if (numUniqueObjects==1)
{
unsigned int idxMin=objectLookup[0].idxMin;
unsigned int idxMax=objectLookup[0].idxMax;
unsigned int sizeDataForObject=idxMax-idxMin+1;
uint64_t pgramOffset=0;
DTYPE foundPeriod;
if (sizeDataForObject>=OBSTHRESH)
{
//original
if (mode==0)
{
supsmu_original_single_object(objectId, sizeDataForObject, minFreq, maxFreq, numFreq, &time[idxMin], &data[idxMin], &error[idxMin], alpha, *pgram+pgramOffset, &foundPeriod, underestGPUcapacityGiB);
}
//single pass
if (mode==1)
{
supsmu_singlepass_single_object(objectId, sizeDataForObject, minFreq, maxFreq, numFreq, &time[idxMin], &data[idxMin], &error[idxMin], alpha, *pgram+pgramOffset, &foundPeriod, underestGPUcapacityGiB);
}
periods[0]=foundPeriod;
}
else
{
periods[0]=0.0;
countSkippedObjectsThresh++;
}
}
//2) multiple objects -- parallelize one object per GPU
//dynamic scheduling since time series are different lengths
else
{
#pragma omp parallel for schedule(dynamic) num_threads(NUMGPU) reduction(+:countSkippedObjectsThresh)
for (unsigned int i=0; i<numUniqueObjects; i++)
{
unsigned int idxMin=objectLookup[i].idxMin;
unsigned int idxMax=objectLookup[i].idxMax;
unsigned int sizeDataForObject=idxMax-idxMin+1;
uint64_t pgramOffset=(uint64_t)i*(uint64_t)numFreq;
DTYPE foundPeriod;
int tid=omp_get_thread_num();
//only process objects with at least OBSTHRESH data points
if(sizeDataForObject>=OBSTHRESH)
{
//original supsmu
if (mode==0)
{
//could parallelize the batch of objectss by parallelizing each object individually
// supsmu_original_single_object(objectId, sizeDataForObject, minFreq, maxFreq, numFreq, &time[idxMin], &data[idxMin], &error[idxMin], alpha, *pgram+pgramOffset, &foundPeriod, underestGPUcapacityGiB);
supsmu_original_single_gpu(&objectLookup[i].objId, sizeDataForObject, minFreq, maxFreq, numFreq, &time[idxMin], &data[idxMin], &error[idxMin], alpha, *pgram+pgramOffset, &foundPeriod, underestGPUcapacityGiB, tid);
}
//single pass supsmu
if (mode==1)
{
supsmu_singlepass_single_gpu(&objectLookup[i].objId, sizeDataForObject, minFreq, maxFreq, numFreq, &time[idxMin], &data[idxMin], &error[idxMin], alpha, *pgram+pgramOffset, &foundPeriod, underestGPUcapacityGiB, tid);
}
periods[i]=foundPeriod;
}
//too few data points to compute the periods
else
{
countSkippedObjectsThresh++;
periods[i]=0.0;
}
} //end parallel for loop
} //end if statement around unique objects
printf("\nNumber of objects skipped because they didn't have %d observations: %u", OBSTHRESH, countSkippedObjectsThresh);
for (unsigned int i=0; i<numUniqueObjects; i++)
{
*sumPeriods+=periods[i];
}
///////////////////////
//Output
//print found periods to stdout
#if PRINTPERIODS==1
outputPeriodsToStdout(objectLookup, numUniqueObjects, periods);
#endif
//print found periods to file
#if PRINTPERIODS==2
outputPeriodsToFile(objectLookup, numUniqueObjects, periods);
#endif
//Output pgram to file
#if PRINTPGRAM==1
outputPgramToFile(objectLookup, numUniqueObjects, numFreq, pgram);
#endif
//End output
///////////////////////
free(periods);
free(objectLookup);
}
void createStreams(hipStream_t * streams, unsigned int num_gpus, unsigned int streams_per_gpu)
{
// #pragma omp parallel for num_threads(num_gpus)
for (unsigned int i=0; i<num_gpus; i++)
{
//set device
hipSetDevice(i);
//create stream for the device
for (unsigned int j=0; j<streams_per_gpu; j++)
{
hipStreamCreate(&streams[(i*streams_per_gpu)+j]);
}
}
}
void destroyStreamsOneGPU(hipStream_t * streams, unsigned int streams_per_gpu, int gpuid)
{
//set device
hipSetDevice(gpuid);
//create stream for the device
for (unsigned int i=0; i<streams_per_gpu; i++)
{
hipStreamDestroy(streams[i]);
}
}
void createStreamsOneGPU(hipStream_t * streams, unsigned int streams_per_gpu, int gpuid)
{
//set device
hipSetDevice(gpuid);
//create stream for the device
for (unsigned int i=0; i<streams_per_gpu; i++)
{
hipStreamCreate(&streams[i]);
}
}
//GPU supersmoother with single pass
//Processes a single object potentially with multiple GPUs
void supsmu_singlepass_single_gpu(unsigned int * objectId, unsigned int sizeData, const DTYPE minFreq, const DTYPE maxFreq, int numFreq, DTYPE * time, DTYPE * data, DTYPE * error, DTYPE alpha, DTYPE * pgram, DTYPE * foundPeriod, double underestGPUcapacityGiB, int gpuid)
{
double tstartcpu=omp_get_wtime();
// int iper=1;
// DTYPE span=0.0;
//Allocate host memory
DTYPE * weights = (DTYPE *)malloc(sizeof(DTYPE)*sizeData);
DTYPE * tt = (DTYPE *)malloc(sizeof(DTYPE)*sizeData);
const DTYPE deltaf=(maxFreq-minFreq)/(numFreq*1.0);
DTYPE chi0=0;
compute_chi0_tt_weights(sizeData, time, data, error, &chi0, tt, weights);
double tendcpu=omp_get_wtime();
printf("\nCPU preamble time: %f", tendcpu - tstartcpu);
double tstartGPUPreabble=omp_get_wtime();
////////////////////////
//for batching the frequencies (not objects)
//1- mode single pass
//1- compute assuming a single GPU
unsigned int numBatches=computeNumBatches(1, sizeData, numFreq, underestGPUcapacityGiB, 1);
//upper limit on the number of frequencies in a batch
int numFreqPerBatch=ceil(numFreq*1.0/numBatches*1.0);
printf("\nNumber of batches: %d, Number of frequencies per batch: %d", numBatches, numFreqPerBatch);
double tstartcreatestream=omp_get_wtime();
hipStream_t batchstreams[NSTREAMSPERGPU*1];
createStreamsOneGPU(batchstreams, NSTREAMSPERGPU, gpuid);
double tendcreatestream=omp_get_wtime();
// printf("\nTime to create streams: %f", tendcreatestream - tstartcreatestream);
//End for batching frequencies
////////////////////////
//Device variables
int * dev_freqarr[1];
DTYPE * dev_smo[1];
DTYPE * dev_t1[1];
int * dev_argkeys[1];
DTYPE * dev_t1_sortby_argkeys[1];
DTYPE * dev_data_sortby_argkeys[1];
DTYPE * dev_weights_sortby_argkeys[1];
DTYPE * dev_tt[1];
DTYPE * dev_data[1];
DTYPE * dev_weights[1];
DTYPE * dev_pgram[1];
//loop used to be here
hipSetDevice(gpuid);
//Those that depend on the number of frequencies (not the number per batch)
gpuErrchk(hipMalloc((void**)&dev_pgram[0], sizeof(DTYPE)*numFreq));
//Arrays broken up into batches based on frequency
gpuErrchk(hipMalloc((void**)&dev_freqarr[0], sizeof(int)*(sizeData*numFreqPerBatch*NSTREAMSPERGPU)));
gpuErrchk(hipMalloc((void**)&dev_argkeys[0], sizeof(int)*(sizeData*numFreqPerBatch*NSTREAMSPERGPU)));
gpuErrchk(hipMalloc((void**)&dev_smo[0], sizeof(DTYPE)*(sizeData*numFreqPerBatch*NSTREAMSPERGPU)));
gpuErrchk(hipMalloc((void**)&dev_t1[0], sizeof(DTYPE)*(sizeData*numFreqPerBatch*NSTREAMSPERGPU)));
gpuErrchk(hipMalloc((void**)&dev_t1_sortby_argkeys[0], sizeof(DTYPE)*(sizeData*numFreqPerBatch*NSTREAMSPERGPU)));
gpuErrchk(hipMalloc((void**)&dev_data_sortby_argkeys[0], sizeof(DTYPE)*(sizeData*numFreqPerBatch*NSTREAMSPERGPU)));
gpuErrchk(hipMalloc((void**)&dev_weights_sortby_argkeys[0], sizeof(DTYPE)*(sizeData*numFreqPerBatch*NSTREAMSPERGPU)));
//allocate on the GPU
gpuErrchk(hipMalloc((void**)&dev_tt[0], sizeof(DTYPE)*sizeData));
gpuErrchk(hipMalloc((void**)&dev_data[0], sizeof(DTYPE)*sizeData));
gpuErrchk(hipMalloc((void**)&dev_weights[0], sizeof(DTYPE)*sizeData));
//copy to the GPU
gpuErrchk(hipMemcpyAsync( dev_tt[0], tt, sizeof(DTYPE)*sizeData, hipMemcpyHostToDevice, batchstreams[0]));
gpuErrchk(hipMemcpyAsync( dev_data[0], data, sizeof(DTYPE)*sizeData, hipMemcpyHostToDevice, batchstreams[0]));
gpuErrchk(hipMemcpyAsync( dev_weights[0], weights, sizeof(DTYPE)*sizeData, hipMemcpyHostToDevice, batchstreams[0]));
// double tend_GPUinit=omp_get_wtime();
// printf("\nTime initializing al GPUs: %f", tend_GPUinit-tstart_GPUinit);
double tendGPUPreabble=omp_get_wtime();
printf("\nTime GPU preamble: %f", tendGPUPreabble - tstartGPUPreabble);
double tstartmainloop=omp_get_wtime();
//Loop over Batches
#pragma omp parallel for num_threads(NSTREAMSPERGPU)
for (unsigned int i=0; i<numBatches; i++)
{
hipSetDevice(gpuid);
int globaltid=omp_get_thread_num();
//thread id for a single GPU
int tid=globaltid%NSTREAMSPERGPU;
uint64_t batchWriteOffset=(uint64_t)i*(uint64_t)numFreqPerBatch;
uint64_t offsetFreqId=(uint64_t)i*(uint64_t)numFreqPerBatch;
int numFreqInBatch=numFreqPerBatch;
int streamOffset=sizeData*numFreqPerBatch*tid;
int streamnum=tid;
//last batch has fewer frequencies
if((numBatches!=1)&&(i==(numBatches-1)))
{
numFreqInBatch=min(numFreqInBatch,((int)numFreq)-((i)*numFreqPerBatch));
}
printf("\nglobal tid: %d, tid: %d, gpuid: %d, Stream num: %d, Batch Number: %d, number of frequencies: %d",globaltid, tid, gpuid, streamnum, i, numFreqInBatch);
unsigned int NUMBLOCKSDATAFREQ=ceil((sizeData*numFreqInBatch*1.0)/LARGEBLOCKSIZE*1.0);
hipLaunchKernelGGL(( computePeriodModFOneThreadPerUpdate), dim3(NUMBLOCKSDATAFREQ),dim3(LARGEBLOCKSIZE),0,batchstreams[streamnum], sizeData, numFreqInBatch, minFreq, offsetFreqId, deltaf, &dev_t1[0][streamOffset], dev_tt[0]);
//Initialize the key arrays
hipLaunchKernelGGL(( initializeKeyArraysOneThreadPerUpdate), dim3(NUMBLOCKSDATAFREQ),dim3(LARGEBLOCKSIZE),0,batchstreams[streamnum], sizeData, numFreqInBatch, &dev_argkeys[0][streamOffset], &dev_freqarr[0][streamOffset]);
//Need to do back to back sorts to sort the t1 by argkeys for each frequency
//Need 3 arrays:
//first sort the keys (argkeys) by the values (t1)
//then sort the argkeys/t1 by the freqarr
backToBackSort(&dev_argkeys[0][streamOffset], &dev_freqarr[0][streamOffset], &dev_t1[0][streamOffset], sizeData, numFreqInBatch, batchstreams[streamnum]);
//Map the keys based on argkeys
//Separate map and transform
#if COALESCED==0 || SINGLEPASSMODE==1 || SINGLEPASSMODE==2
hipLaunchKernelGGL(( mapUsingArgKeysOneThreadPerUpdate), dim3(NUMBLOCKSDATAFREQ),dim3(LARGEBLOCKSIZE),0,batchstreams[streamnum], sizeData, numFreqInBatch, &dev_argkeys[0][streamOffset], &dev_data[0][0], &dev_weights[0][0], &dev_t1[0][streamOffset], &dev_t1_sortby_argkeys[0][streamOffset], &dev_data_sortby_argkeys[0][streamOffset], &dev_weights_sortby_argkeys[0][streamOffset]);
#endif
//combine map and transform for coalesced memory accesses for global memory kernel
#if SINGLEPASSMODE==0 && COALESCED==1
hipLaunchKernelGGL(( mapUsingArgKeysOneThreadPerUpdateAndReorderCoalesced), dim3(NUMBLOCKSDATAFREQ),dim3(LARGEBLOCKSIZE),0,batchstreams[streamnum], sizeData, numFreqInBatch, &dev_argkeys[0][streamOffset],
&dev_data[0][0], &dev_weights[0][0],
&dev_t1[0][streamOffset], &dev_t1_sortby_argkeys[0][streamOffset], &dev_data_sortby_argkeys[0][streamOffset], &dev_weights_sortby_argkeys[0][streamOffset]);
#endif
///////////////////////////////
// Main kernels
///////////////////////////////
//global memory only
#if SINGLEPASSMODE==0
const unsigned int numBlocks=ceil((numFreqInBatch*1.0)/(SMALLBLOCKSIZE*1.0));
#if COALESCED==0
hipLaunchKernelGGL(( supsmukernelSinglePassGlobalMemory), dim3(numBlocks),dim3(SMALLBLOCKSIZE), 0,batchstreams[streamnum], numFreqInBatch, sizeData, alpha, &dev_smo[0][streamOffset],
&dev_tt[0][0], &dev_t1_sortby_argkeys[0][streamOffset], &dev_data_sortby_argkeys[0][streamOffset], &dev_weights_sortby_argkeys[0][streamOffset]);
#endif
#if COALESCED==1
hipLaunchKernelGGL(( supsmukernelSinglePassGlobalMemoryCoalesced), dim3(numBlocks),dim3(SMALLBLOCKSIZE), 0,batchstreams[streamnum], numFreqInBatch, sizeData, alpha, &dev_smo[0][streamOffset],
&dev_tt[0][0], &dev_t1_sortby_argkeys[0][streamOffset], &dev_data_sortby_argkeys[0][streamOffset], &dev_weights_sortby_argkeys[0][streamOffset]);
#endif
#endif
//One block per frequency with SM -- use small block size (e.g., 32 threads/block)
#if SINGLEPASSMODE==1
//Shared memory for x,y,z arrays
const unsigned int SMSIZE=sizeof(DTYPE)*3*sizeData;
hipLaunchKernelGGL(( supsmukernelSinglePassSMOneBlockPerFreq), dim3(numFreqInBatch),dim3(SMALLBLOCKSIZE),SMSIZE,batchstreams[streamnum], sizeData, alpha, &dev_smo[0][streamOffset],
&dev_t1_sortby_argkeys[0][streamOffset], &dev_data_sortby_argkeys[0][streamOffset], &dev_weights_sortby_argkeys[0][streamOffset]);
#endif
#if SINGLEPASSMODE==2
//uses one thread per frequency with shared memory for x,y,z arrays
const unsigned int numBlocks=ceil((numFreqInBatch*1.0)/(SMALLBLOCKSIZE*1.0));
const unsigned int SMSIZEDATA=sizeof(DTYPE)*3*sizeData*SMALLBLOCKSIZE;
hipLaunchKernelGGL(( supsmukernelSinglePassSMOneThreadPerFreq), dim3(numBlocks),dim3(SMALLBLOCKSIZE),SMSIZEDATA,batchstreams[streamnum], numFreqInBatch, sizeData,
alpha, &dev_smo[0][streamOffset], &dev_t1_sortby_argkeys[0][streamOffset], &dev_data_sortby_argkeys[0][streamOffset],
&dev_weights_sortby_argkeys[0][streamOffset]);
#endif
//Some number of threads per frequency
unsigned int numThreadPerFreq2=8; //must divide evenly into the block size
unsigned int NUMBLOCKS10=ceil((numFreqInBatch*numThreadPerFreq2*1.0)/(LARGEBLOCKSIZE*1.0));
const unsigned int SMSIZE2=sizeof(DTYPE)*(LARGEBLOCKSIZE/numThreadPerFreq2);
#if COALESCED==0 || SINGLEPASSMODE==1 || SINGLEPASSMODE==2
hipLaunchKernelGGL(( computePgramReduction), dim3(NUMBLOCKS10), dim3(LARGEBLOCKSIZE), SMSIZE2, batchstreams[streamnum], batchWriteOffset, numThreadPerFreq2, chi0, sizeData, numFreqInBatch, &dev_smo[0][streamOffset], &dev_data_sortby_argkeys[0][streamOffset], &dev_weights_sortby_argkeys[0][streamOffset], &dev_pgram[0][0]);
#endif
#if SINGLEPASSMODE==0 && COALESCED==1
hipLaunchKernelGGL(( computePgramReductionCoalesced), dim3(NUMBLOCKS10), dim3(LARGEBLOCKSIZE), SMSIZE2, batchstreams[streamnum], batchWriteOffset, numThreadPerFreq2, chi0, sizeData,
numFreqInBatch, &dev_smo[0][streamOffset], &dev_data_sortby_argkeys[0][streamOffset], &dev_weights_sortby_argkeys[0][streamOffset], &dev_pgram[0][0]);
#endif
//Copy pgram back to host
gpuErrchk(hipMemcpyAsync(pgram+batchWriteOffset, &dev_pgram[0][batchWriteOffset], sizeof(DTYPE)*numFreqInBatch, hipMemcpyDeviceToHost, batchstreams[streamnum]));
// printf("\nBatch: %d, write pgram range: %d, %d ",i, batchWriteOffset,batchWriteOffset+numFreqInBatch);
} //end loop over batches
double tendmainloop=omp_get_wtime();
printf("\nTime main loop: %f",tendmainloop - tstartmainloop);
///////////////////////////////
// End main kernels
///////////////////////////////
double tstartperiod=omp_get_wtime();
computePeriodSuperSmoother(pgram, numFreq, minFreq, maxFreq, foundPeriod);
double tendperiod=omp_get_wtime();
printf("\nObject id: %d, Found period: %f", *objectId, *foundPeriod);
printf("\nTime to compute period: %f", tendperiod - tstartperiod);
double tstartfree=omp_get_wtime();
//free device data
hipFree(dev_pgram[0]);
hipFree(dev_freqarr[0]);
hipFree(dev_argkeys[0]);
hipFree(dev_smo[0]);
hipFree(dev_t1[0]);
hipFree(dev_t1_sortby_argkeys[0]);
hipFree(dev_data_sortby_argkeys[0]);
hipFree(dev_weights_sortby_argkeys[0]);
hipFree(dev_tt[0]);
hipFree(dev_data[0]);
hipFree(dev_weights[0]);
destroyStreamsOneGPU(batchstreams, NSTREAMSPERGPU, gpuid);
//free host data
free(weights);
free(tt);
double tendfree=omp_get_wtime();
printf("\nTime to free: %f", tendfree - tstartfree);
}
//GPU supersmoother with single pass
//Processes a single object potentially with multiple GPUs
void supsmu_singlepass_single_object(unsigned int * objectId, unsigned int sizeData, const DTYPE minFreq, const DTYPE maxFreq, int numFreq, DTYPE * time, DTYPE * data, DTYPE * error, DTYPE alpha, DTYPE * pgram, DTYPE * foundPeriod, double underestGPUcapacityGiB)
{
double tstartcpu=omp_get_wtime();
// int iper=1;
// DTYPE span=0.0;
//Allocate host memory
DTYPE * weights = (DTYPE *)malloc(sizeof(DTYPE)*sizeData);
DTYPE * tt = (DTYPE *)malloc(sizeof(DTYPE)*sizeData);
const DTYPE deltaf=(maxFreq-minFreq)/(numFreq*1.0);
DTYPE chi0=0;
compute_chi0_tt_weights(sizeData, time, data, error, &chi0, tt, weights);
double tendcpu=omp_get_wtime();
printf("\nCPU preamble time: %f", tendcpu - tstartcpu);
double tstartGPUPreabble=omp_get_wtime();
////////////////////////
//for batching the frequencies (not objects)
//1- mode single pass
//0- compute all batches assuming using all GPUs
unsigned int numBatches=computeNumBatches(1, sizeData, numFreq, underestGPUcapacityGiB, 0);
//upper limit on the number of frequencies in a batch
int numFreqPerBatch=ceil(numFreq*1.0/numBatches*1.0);
printf("\nNumber of batches: %d, Number of frequencies per batch: %d", numBatches, numFreqPerBatch);
double tstartcreatestream=omp_get_wtime();
hipStream_t batchstreams[NSTREAMSPERGPU*NUMGPU];
createStreams(batchstreams, NUMGPU, NSTREAMSPERGPU);
double tendcreatestream=omp_get_wtime();
// printf("\nTime to create streams: %f", tendcreatestream - tstartcreatestream);
//End for batching frequencies
////////////////////////
//Device variables
int * dev_freqarr[NUMGPU];
DTYPE * dev_smo[NUMGPU];
DTYPE * dev_t1[NUMGPU];
int * dev_argkeys[NUMGPU];
DTYPE * dev_t1_sortby_argkeys[NUMGPU];
DTYPE * dev_data_sortby_argkeys[NUMGPU];
DTYPE * dev_weights_sortby_argkeys[NUMGPU];
DTYPE * dev_tt[NUMGPU];
DTYPE * dev_data[NUMGPU];
DTYPE * dev_weights[NUMGPU];
DTYPE * dev_pgram[NUMGPU];
// double tstart_GPUinit=omp_get_wtime();
// printf("\nParallelize hipMalloc for each GPU later");
//Allocate memory and copy data to each GPU
#pragma omp parallel for num_threads(NUMGPU)
for (int i=0; i<NUMGPU; i++)
{
int globaltid=omp_get_thread_num();
int tid=globaltid%NSTREAMSPERGPU;
int gpuid=globaltid/NSTREAMSPERGPU;
int streamnum=(gpuid*NSTREAMSPERGPU)+tid;
hipSetDevice(i);
//Those that depend on the number of frequencies (not the number per batch)
gpuErrchk(hipMalloc((void**)&dev_pgram[i], sizeof(DTYPE)*numFreq));
//Arrays broken up into batches based on frequency
gpuErrchk(hipMalloc((void**)&dev_freqarr[i], sizeof(int)*(sizeData*numFreqPerBatch*NSTREAMSPERGPU)));
gpuErrchk(hipMalloc((void**)&dev_argkeys[i], sizeof(int)*(sizeData*numFreqPerBatch*NSTREAMSPERGPU)));
gpuErrchk(hipMalloc((void**)&dev_smo[i], sizeof(DTYPE)*(sizeData*numFreqPerBatch*NSTREAMSPERGPU)));
gpuErrchk(hipMalloc((void**)&dev_t1[i], sizeof(DTYPE)*(sizeData*numFreqPerBatch*NSTREAMSPERGPU)));
gpuErrchk(hipMalloc((void**)&dev_t1_sortby_argkeys[i], sizeof(DTYPE)*(sizeData*numFreqPerBatch*NSTREAMSPERGPU)));
gpuErrchk(hipMalloc((void**)&dev_data_sortby_argkeys[i], sizeof(DTYPE)*(sizeData*numFreqPerBatch*NSTREAMSPERGPU)));
gpuErrchk(hipMalloc((void**)&dev_weights_sortby_argkeys[i], sizeof(DTYPE)*(sizeData*numFreqPerBatch*NSTREAMSPERGPU)));
//allocate on the GPU
gpuErrchk(hipMalloc((void**)&dev_tt[i], sizeof(DTYPE)*sizeData));
gpuErrchk(hipMalloc((void**)&dev_data[i], sizeof(DTYPE)*sizeData));
gpuErrchk(hipMalloc((void**)&dev_weights[i], sizeof(DTYPE)*sizeData));
//copy to the GPU
gpuErrchk(hipMemcpyAsync( dev_tt[i], tt, sizeof(DTYPE)*sizeData, hipMemcpyHostToDevice, batchstreams[streamnum]));
gpuErrchk(hipMemcpyAsync( dev_data[i], data, sizeof(DTYPE)*sizeData, hipMemcpyHostToDevice, batchstreams[streamnum]));
gpuErrchk(hipMemcpyAsync( dev_weights[i], weights, sizeof(DTYPE)*sizeData, hipMemcpyHostToDevice, batchstreams[streamnum]));
}
// double tend_GPUinit=omp_get_wtime();
// printf("\nTime initializing al GPUs: %f", tend_GPUinit-tstart_GPUinit);
double tendGPUPreabble=omp_get_wtime();
printf("\nTime GPU preamble: %f", tendGPUPreabble - tstartGPUPreabble);
double tstartmainloop=omp_get_wtime();
//Loop over Batches
#pragma omp parallel for num_threads(NUMGPU*NSTREAMSPERGPU)
for (unsigned int i=0; i<numBatches; i++)
{
int globaltid=omp_get_thread_num();
//thread id for a single GPU
int tid=globaltid%NSTREAMSPERGPU;
int gpuid=globaltid/NSTREAMSPERGPU;
uint64_t batchWriteOffset=(uint64_t)i*(uint64_t)numFreqPerBatch;
uint64_t offsetFreqId=(uint64_t)i*(uint64_t)numFreqPerBatch;
int numFreqInBatch=numFreqPerBatch;
int streamOffset=sizeData*numFreqPerBatch*tid;
int streamnum=(gpuid*NSTREAMSPERGPU)+tid;
hipSetDevice(gpuid);
//last batch has fewer frequencies
if((numBatches!=1)&&(i==(numBatches-1)))
{
numFreqInBatch=min(numFreqInBatch,((int)numFreq)-((i)*numFreqPerBatch));
}
printf("\nglobal tid: %d, tid: %d, gpuid: %d, Stream num: %d, Batch Number: %d, number of frequencies: %d",globaltid, tid, gpuid, streamnum, i, numFreqInBatch);
// unsigned int NUMBLOCKSDATAFREQ=ceil((sizeData*numFreqInBatch*1.0)/LARGEBLOCKSIZE*1.0);
//hipLaunchKernelGGL(( computePeriodModFOneThreadPerUpdate), dim3(NUMBLOCKSDATAFREQ),dim3(LARGEBLOCKSIZE),0,batchstreams[streamnum], sizeData, numFreqInBatch, minFreqBatch, deltaf, &dev_t1[gpuid][streamOffset], dev_tt[gpuid]);
unsigned int NUMBLOCKSDATAFREQ=ceil((sizeData*numFreqInBatch*1.0)/LARGEBLOCKSIZE*1.0);
hipLaunchKernelGGL(( computePeriodModFOneThreadPerUpdate), dim3(NUMBLOCKSDATAFREQ),dim3(LARGEBLOCKSIZE),0,batchstreams[streamnum], sizeData, numFreqInBatch, minFreq, offsetFreqId, deltaf, &dev_t1[gpuid][streamOffset], dev_tt[gpuid]);
//Initialize the key arrays
hipLaunchKernelGGL(( initializeKeyArraysOneThreadPerUpdate), dim3(NUMBLOCKSDATAFREQ),dim3(LARGEBLOCKSIZE),0,batchstreams[streamnum], sizeData, numFreqInBatch, &dev_argkeys[gpuid][streamOffset], &dev_freqarr[gpuid][streamOffset]);
//Need to do back to back sorts to sort the t1 by argkeys for each frequency
//Need 3 arrays:
//first sort the keys (argkeys) by the values (t1)
//then sort the argkeys/t1 by the freqarr
backToBackSort(&dev_argkeys[gpuid][streamOffset], &dev_freqarr[gpuid][streamOffset], &dev_t1[gpuid][streamOffset], sizeData, numFreqInBatch, batchstreams[streamnum]);
//Map the keys based on argkeys
//Separate map and transform
#if COALESCED==0 || SINGLEPASSMODE==1 || SINGLEPASSMODE==2
hipLaunchKernelGGL(( mapUsingArgKeysOneThreadPerUpdate), dim3(NUMBLOCKSDATAFREQ),dim3(LARGEBLOCKSIZE),0,batchstreams[streamnum], sizeData, numFreqInBatch, &dev_argkeys[gpuid][streamOffset], &dev_data[gpuid][0], &dev_weights[gpuid][0], &dev_t1[gpuid][streamOffset], &dev_t1_sortby_argkeys[gpuid][streamOffset], &dev_data_sortby_argkeys[gpuid][streamOffset], &dev_weights_sortby_argkeys[gpuid][streamOffset]);
#endif
//combine map and transform for coalesced memory accesses for global memory kernel
#if SINGLEPASSMODE==0 && COALESCED==1
hipLaunchKernelGGL(( mapUsingArgKeysOneThreadPerUpdateAndReorderCoalesced), dim3(NUMBLOCKSDATAFREQ),dim3(LARGEBLOCKSIZE),0,batchstreams[streamnum], sizeData, numFreqInBatch, &dev_argkeys[gpuid][streamOffset],
&dev_data[gpuid][0], &dev_weights[gpuid][0],
&dev_t1[gpuid][streamOffset], &dev_t1_sortby_argkeys[gpuid][streamOffset], &dev_data_sortby_argkeys[gpuid][streamOffset], &dev_weights_sortby_argkeys[gpuid][streamOffset]);
#endif
///////////////////////////////
// Main kernels
///////////////////////////////
//global memory only
#if SINGLEPASSMODE==0
const unsigned int numBlocks=ceil((numFreqInBatch*1.0)/(SMALLBLOCKSIZE*1.0));
#if COALESCED==0
hipLaunchKernelGGL(( supsmukernelSinglePassGlobalMemory), dim3(numBlocks),dim3(SMALLBLOCKSIZE), 0,batchstreams[streamnum], numFreqInBatch, sizeData, alpha, &dev_smo[gpuid][streamOffset],
&dev_tt[gpuid][0], &dev_t1_sortby_argkeys[gpuid][streamOffset], &dev_data_sortby_argkeys[gpuid][streamOffset], &dev_weights_sortby_argkeys[gpuid][streamOffset]);
#endif
#if COALESCED==1
hipLaunchKernelGGL(( supsmukernelSinglePassGlobalMemoryCoalesced), dim3(numBlocks),dim3(SMALLBLOCKSIZE), 0,batchstreams[streamnum], numFreqInBatch, sizeData, alpha, &dev_smo[gpuid][streamOffset],
&dev_tt[gpuid][0], &dev_t1_sortby_argkeys[gpuid][streamOffset], &dev_data_sortby_argkeys[gpuid][streamOffset], &dev_weights_sortby_argkeys[gpuid][streamOffset]);
#endif
#endif
//One block per frequency with SM -- use small block size (e.g., 32 threads/block)
#if SINGLEPASSMODE==1
//Shared memory for x,y,z arrays
const unsigned int SMSIZE=sizeof(DTYPE)*3*sizeData;
hipLaunchKernelGGL(( supsmukernelSinglePassSMOneBlockPerFreq), dim3(numFreqInBatch),dim3(SMALLBLOCKSIZE),SMSIZE,batchstreams[streamnum], sizeData, alpha, &dev_smo[gpuid][streamOffset],
&dev_t1_sortby_argkeys[gpuid][streamOffset], &dev_data_sortby_argkeys[gpuid][streamOffset], &dev_weights_sortby_argkeys[gpuid][streamOffset]);
#endif
#if SINGLEPASSMODE==2
//uses one thread per frequency with shared memory for x,y,z arrays
const unsigned int numBlocks=ceil((numFreqInBatch*1.0)/(SMALLBLOCKSIZE*1.0));
const unsigned int SMSIZEDATA=sizeof(DTYPE)*3*sizeData*SMALLBLOCKSIZE;
hipLaunchKernelGGL(( supsmukernelSinglePassSMOneThreadPerFreq), dim3(numBlocks),dim3(SMALLBLOCKSIZE),SMSIZEDATA,batchstreams[streamnum], numFreqInBatch, sizeData,
alpha, &dev_smo[gpuid][streamOffset], &dev_t1_sortby_argkeys[gpuid][streamOffset], &dev_data_sortby_argkeys[gpuid][streamOffset],
&dev_weights_sortby_argkeys[gpuid][streamOffset]);
#endif
//Some number of threads per frequency
unsigned int numThreadPerFreq2=8; //must divide evenly into the block size
unsigned int NUMBLOCKS10=ceil((numFreqInBatch*numThreadPerFreq2*1.0)/(LARGEBLOCKSIZE*1.0));
const unsigned int SMSIZE2=sizeof(DTYPE)*(LARGEBLOCKSIZE/numThreadPerFreq2);
#if COALESCED==0 || SINGLEPASSMODE==1 || SINGLEPASSMODE==2
hipLaunchKernelGGL(( computePgramReduction), dim3(NUMBLOCKS10), dim3(LARGEBLOCKSIZE), SMSIZE2, batchstreams[streamnum], batchWriteOffset, numThreadPerFreq2, chi0, sizeData, numFreqInBatch, &dev_smo[gpuid][streamOffset], &dev_data_sortby_argkeys[gpuid][streamOffset], &dev_weights_sortby_argkeys[gpuid][streamOffset], &dev_pgram[gpuid][0]);
#endif
#if SINGLEPASSMODE==0 && COALESCED==1
hipLaunchKernelGGL(( computePgramReductionCoalesced), dim3(NUMBLOCKS10), dim3(LARGEBLOCKSIZE), SMSIZE2, batchstreams[streamnum], batchWriteOffset, numThreadPerFreq2, chi0, sizeData,
numFreqInBatch, &dev_smo[gpuid][streamOffset], &dev_data_sortby_argkeys[gpuid][streamOffset], &dev_weights_sortby_argkeys[gpuid][streamOffset], &dev_pgram[gpuid][0]);
#endif
//Copy pgram back to host
gpuErrchk(hipMemcpyAsync(pgram+batchWriteOffset, &dev_pgram[gpuid][batchWriteOffset], sizeof(DTYPE)*numFreqInBatch, hipMemcpyDeviceToHost, batchstreams[streamnum]));
// printf("\nBatch: %d, write pgram range: %d, %d ",i, batchWriteOffset,batchWriteOffset+numFreqInBatch);
} //end loop over batches
double tendmainloop=omp_get_wtime();
printf("\nTime main loop: %f",tendmainloop - tstartmainloop);
///////////////////////////////
// End main kernels
///////////////////////////////
double tstartperiod=omp_get_wtime();
computePeriodSuperSmoother(pgram, numFreq, minFreq, maxFreq, foundPeriod);
double tendperiod=omp_get_wtime();
printf("\nFound period: %f", *foundPeriod);
printf("\nTime to compute period: %f", tendperiod - tstartperiod);
double tstartfree=omp_get_wtime();
//free device data
#pragma omp parallel for num_threads(NUMGPU)
for (int i=0; i<NUMGPU; i++)
{
hipFree(dev_pgram[i]);
hipFree(dev_freqarr[i]);
hipFree(dev_argkeys[i]);
hipFree(dev_smo[i]);
hipFree(dev_t1[i]);
hipFree(dev_t1_sortby_argkeys[i]);
hipFree(dev_data_sortby_argkeys[i]);
hipFree(dev_weights_sortby_argkeys[i]);
hipFree(dev_tt[i]);
hipFree(dev_data[i]);
hipFree(dev_weights[i]);
}
//free host data
free(weights);
free(tt);
double tendfree=omp_get_wtime();
printf("\nTime to free: %f", tendfree - tstartfree);
}
//Estimated memory footprint used to compute the number of batches
//used to compute the number of batches
//mode-0 is original
//mode-1 is single pass
//pass in the underestimated capacity
//singlegpuflag- 0- use NUMGPU GPUs
//singlegpuflag- 1- use 1 GPU
unsigned int computeNumBatches(bool mode, unsigned int sizeData, unsigned int numFreq, double underestGPUcapacityGiB, bool singlegpuflag)
{
printf("\n*********************");
//Memory footprint assuming FP64 data
//Single pass: sp=[1/(1024**3)]*[(8*Nf)+(3*8*Nt)+(2*4*Nf*Nt)+(5*8*Nf*Nt)+(2*3*8*nf*nt)]
//original: sp+(8*nf*nt)
double totalGiB=0.0;
//pgram
totalGiB+=sizeof(DTYPE)*numFreq/(1024*1024*1024.0);
//tt, data, weights
totalGiB+=3*sizeof(DTYPE)*sizeData/(1024*1024*1024.0);
//freqArr, argkeys
totalGiB+=2*sizeof(int)*numFreq*sizeData/(1024*1024*1024.0);
//smo, t1, t1_sortby_argkeys, data_sortby_argkeys, weights_sortby_argkeys,
totalGiB+=5*sizeof(DTYPE)*numFreq*sizeData/(1024*1024*1024.0);
// sorting (out-of-place radix sorting requires an extra n storage, but overestimate 2n because back-to-back may require 2n)
totalGiB+=2*3*sizeof(DTYPE)*numFreq*sizeData/(1024*1024*1024.0);
//account for scratch in original algorithm
if (mode==0)
{
totalGiB+=sizeof(DTYPE)*numFreq*sizeData*8/(1024*1024*1024.0);
}
printf("\nEstimated global memory footprint (GiB): %f", totalGiB);
unsigned int numBatches=ceil(totalGiB/(underestGPUcapacityGiB))*NSTREAMSPERGPU;
printf("\nMinimum number of batches: %u", numBatches);
if (singlegpuflag==0)
{
numBatches=ceil((numBatches*1.0/NUMGPU))*NUMGPU;
printf("\nNumber of batches (after ensuring batches evenly divide %d GPUs): %u", NUMGPU, numBatches);
}
else
{
printf("\nNumber of batches (after ensuring batches evenly divide 1 GPUs): %u", numBatches);
}
printf("\n*********************\n");
return numBatches;
}
void outputPgramToFile(struct lookupObj * objectLookup, unsigned int numUniqueObjects, unsigned int numFreqs, DTYPE ** pgram)
{
char fnameoutput[]="pgram_SS.txt";
printf("\nPrinting the pgram to file: %s", fnameoutput);
ofstream pgramoutput;
pgramoutput.open(fnameoutput,ios::out);
pgramoutput.precision(4);
for (unsigned int i=0; i<numUniqueObjects; i++)
{
pgramoutput<<objectLookup[i].objId<<", ";
for (unsigned int j=0; j<numFreqs; j++)
{
pgramoutput<<(*pgram)[(i*numFreqs)+j]<<", ";
}
pgramoutput<<endl;
}
pgramoutput.close();
}
void outputPeriodsToFile(struct lookupObj * objectLookup, unsigned int numUniqueObjects, DTYPE * foundPeriod)
{
char fnamebestperiods[]="bestperiods_SS.txt";
printf("\nPrinting the best periods to file: %s", fnamebestperiods);
ofstream bestperiodsoutput;
bestperiodsoutput.open(fnamebestperiods,ios::out);
bestperiodsoutput.precision(7);
for (unsigned int i=0; i<numUniqueObjects; i++)
{
bestperiodsoutput<<objectLookup[i].objId<<", "<<foundPeriod[i]<<endl;
}
bestperiodsoutput.close();
}
void outputPeriodsToStdout(struct lookupObj * objectLookup, unsigned int numUniqueObjects, DTYPE * foundPeriod)
{
for (unsigned int i=0; i<numUniqueObjects; i++)
{
printf("\nObject: %d Period: %f, ",objectLookup[i].objId,foundPeriod[i]);
}
}
| c1c1f06dbc7340007a84d51c18ee64ecb49b908f.cu | #include <unistd.h>
#include <istream>
#include <iostream>
#include <sstream>
//Only include parameters file if we're not creating the shared library
#ifndef PYTHON
#include "params.h"
#endif
#include "supsmu.h"
#include <stdlib.h>
#include <math.h>
#include <algorithm>
#include <numeric>
#include <vector>
#include <fstream>
#include <omp.h>
#include <thrust/sort.h>
#include <thrust/host_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/device_vector.h>
#include <thrust/extrema.h>
#include "structs.h"
#include "main.h"
#include "kernel.h"
using namespace std;
template <typename T>
std::vector<int> sort_indexes(const std::vector<T> &v) {
// initialize original index locations
std::vector<int> idx(v.size());
iota(idx.begin(), idx.end(), 0);
// sort indexes based on comparing values in v
// using std::stable_sort instead of std::sort
// to avoid unnecessary index re-orderings
// when v contains elements of equal values
stable_sort(idx.begin(), idx.end(),
[&v](int i1, int i2) {return v[i1] < v[i2];});
return idx;
}
//original port from Nat's code before breaking into two separate functions
void supsmu_periodogram(int n, const DTYPE minFreq, const DTYPE maxFreq, int numFreq, DTYPE * time, DTYPE * data, DTYPE * error, DTYPE alpha, DTYPE * pgram)
{
DTYPE deltaf=(maxFreq-minFreq)/numFreq;
//runs supersmoother for folded lightcurves on a frequency grid
//compute minimum time
DTYPE minTime=time[0];
for (int i=0; i<n; i++)
{
if (time[i]<minTime)
{
minTime=time[i];
}
}
DTYPE * tt = (DTYPE *)malloc(sizeof(DTYPE)*n);
for (int i=0; i<n; i++)
{
tt[i]=time[i]-minTime;
}
DTYPE * weights = (DTYPE *)malloc(sizeof(DTYPE)*n);
for (int i=0; i<n; i++)
{
weights[i]=1.0/(error[i]*error[i]);
}
DTYPE w0=0.0;
for (int i=0; i<n; i++)
{
w0+=weights[i];
}
w0=w0/(n*1.0);
DTYPE * y = (DTYPE *)malloc(sizeof(DTYPE)*n);
std::copy(data, data+n, y);
DTYPE tmp=0;
for (int i=0; i<n; i++)
{
tmp+=(data[i]*weights[i]);
}
tmp=tmp/(n*1.0);
DTYPE y0=tmp/w0;
for (int i=0; i<n; i++)
{
y[i]=y[i]-y0;
}
//
tmp=0;
for (int i=0; i<n; i++)
{
tmp+=(y[i]*y[i])*weights[i];
}
DTYPE chi0=tmp/(n*1.0);
DTYPE * chi2=(DTYPE *)malloc(sizeof(DTYPE)*numFreq);
//Arrays that need to be allocated for each thread
DTYPE * sc=(DTYPE *)malloc(sizeof(DTYPE)*n*8*NTHREADSCPU);
DTYPE * smo=(DTYPE *)malloc(sizeof(DTYPE)*n*NTHREADSCPU);
DTYPE * t1=(DTYPE *)malloc(sizeof(DTYPE)*n*NTHREADSCPU);
int * argkeys=(int *)malloc(sizeof(int)*n*NTHREADSCPU);
DTYPE * t1_sortby_argkeys=(DTYPE *)malloc(sizeof(DTYPE)*n*NTHREADSCPU);
DTYPE * data_sortby_argkeys=(DTYPE *)malloc(sizeof(DTYPE)*n*NTHREADSCPU);
DTYPE * weights_sortby_argkeys=(DTYPE *)malloc(sizeof(DTYPE)*n*NTHREADSCPU);
double tstart=omp_get_wtime();
#pragma omp parallel for num_threads(NTHREADSCPU)
for (int i=0; i<numFreq; i++)
{
int tid=omp_get_thread_num();
//Offsets into arrays for each thread
unsigned int offset_n=tid*n;
unsigned int offset_sc=tid*n*8;
DTYPE p=1.0/(minFreq+(deltaf*i));
for (int j=0; j<n; j++)
{
t1[offset_n+j]=fmod(tt[j],p)/p;
}
//Do argsort on t1
// sortKeyValuePairsIntDouble(argkeys+offset_n, t1+offset_n, n);
sortKeyValuePairsIntFloatDouble(argkeys+offset_n, t1+offset_n, n);
//Map t1, data, and weights to the order given by argsorting t1
mapArr(t1+offset_n, t1_sortby_argkeys+offset_n, argkeys+offset_n, n);
mapArr(data, data_sortby_argkeys+offset_n, argkeys+offset_n, n);
mapArr(weights, weights_sortby_argkeys+offset_n, argkeys+offset_n, n);
chi2[i]=supsmu_chi2(n, t1_sortby_argkeys+offset_n, data_sortby_argkeys+offset_n, weights_sortby_argkeys+offset_n, smo+offset_n, sc+offset_sc, alpha);
}
double tend=omp_get_wtime();
printf("\nTime main loop: %f", tend - tstart);
for (int i=0; i<numFreq; i++)
{
pgram[i]=(0.5*(chi0-chi2[i])*n)/chi0;
}
}
//overloaded function for float/doubles
void mapArr(double * inArr, double * outArr, int * keys, int n)
{
for (int i=0; i<n; i++)
{
outArr[i]=inArr[keys[i]];
}
}
//overloaded function for float/doubles
void mapArr(float * inArr, float * outArr, int * keys, int n)
{
for (int i=0; i<n; i++)
{
outArr[i]=inArr[keys[i]];
}
}
//overloaded function for float/doubles
void sortKeyValuePairsIntFloatDouble(int * keys, double * values, int n)
{
std::vector<double>val_vect(values, values+n);
std::vector<int>keys_vect = sort_indexes(val_vect);
std::copy(keys_vect.begin(), keys_vect.end(), keys);
}
//overloaded function for float/doubles
void sortKeyValuePairsIntFloatDouble(int * keys, float * values, int n)
{
std::vector<float>val_vect(values, values+n);
std::vector<int>keys_vect = sort_indexes(val_vect);
std::copy(keys_vect.begin(), keys_vect.end(), keys);
}
//overloaded function for float/doubles
// void sortKeyValuePairsIntDouble(int * keys, double * values, int n)
// {
// std::vector<double>val_vect(values, values+n);
// std::vector<int>keys_vect = sort_indexes(val_vect);
// std::copy(keys_vect.begin(), keys_vect.end(), keys);
// }
//overloaded function for float/doubles
// void sortKeyValuePairsIntFloat(int * keys, float * values, int n)
// {
// std::vector<float>val_vect(values, values+n);
// std::vector<int>keys_vect = sort_indexes(val_vect);
// std::copy(keys_vect.begin(), keys_vect.end(), keys);
// }
DTYPE supsmu_chi2(int n, DTYPE * time, DTYPE * data, DTYPE * weights , DTYPE * smo, DTYPE * sc, DTYPE alpha)
{
//NAT: is iper==1? [yes- means periodic]
int iper=1;
//NAT: is span==0.0? [yes- lets supersmoother work its magic (otherwise uses input span)]
DTYPE span=0.0;
supsmu(n, time, data, weights, iper, span, alpha, smo, sc);
DTYPE tmptotal=0;
for (int i=0; i<n; i++){
tmptotal+=((data[i]-smo[i])*(data[i]-smo[i]))*weights[i];
}
return tmptotal/(n*1.0);
}
DTYPE supsmu_singlepass_chi2(int n, DTYPE * time, DTYPE * data, DTYPE * weights , DTYPE * smo, DTYPE alpha)
{
//NAT: is iper==1? [yes- means periodic]
int iper=1;
//NAT: is span==0.0? [yes- lets supersmoother work its magic (otherwise uses input span)]
DTYPE span=0.0;
supsmusinglepass(n, time, data, weights, iper, span, alpha, smo);
DTYPE tmptotal=0;
for (int i=0; i<n; i++){
tmptotal+=((data[i]-smo[i])*(data[i]-smo[i]))*weights[i];
}
return tmptotal/(n*1.0);
}
//Copied/pasted comments from original fortran code by Friedman
// input:
// n : number of observations (x,y - pairs).
// x(n) : ordered abscissa values.
// y(n) : corresponding ordinate (response) values.
// w(n) : weight for each (x,y) observation.
// iper : periodic variable flag.
// iper=1 => x is ordered interval variable.
// iper=2 => x is a periodic variable with values
// in the range (0.0,1.0) and period 1.0.
// span : smoother span (fraction of observations in window).
// span=0.0 => automatic (variable) span selection.
// alpha : controles high frequency (small span) penality
// used with automatic span selection (bass tone control).
// (alpha.le.0.0 or alpha.gt.10.0 => no effect.)
// output:
// smo(n) : smoothed ordinate (response) values.
// scratch:
// sc(n,7) : internal working storage.
int supsmu (int n, DTYPE * x, DTYPE * y, DTYPE * w, int iper, DTYPE span, DTYPE alpha, DTYPE * smo, DTYPE * sc) {
// sc is scratch space (8,n)
// output is smo: smoothed version of y
int i,j,jper;
DTYPE vsmlsq,sw,sy,a,scale,resmin,tmp,f;
// spans to be estimated: tweeter, midrange, and woofer
DTYPE spans[] = {0.05,0.2,0.5};
if (x[n-1]<=x[0]) {
sy=0.0;
sw=sy;
for (j=0;j<n;j++) {
sy=sy+w[j]*y[j];
sw=sw+w[j];
}
a=0.0;
if (sw>0) a=sy/sw;
for (j=0;j<n;j++) smo[j] = a;
return 0;
}
i=n/4-1;
j=3*(i+1)-1;
scale=x[j]-x[i];
//Nat: can be removed
// while (scale<=0) {
// if (j<n-1) j+=1;
// if (i>0) i-=1;
// scale=x[j]-x[i];
// }
vsmlsq=1.e-6*scale*scale;
jper=iper;
if (iper==2 && (x[0]<0 || x[n-1]>1)) jper=1;
if (jper<1 || jper>2) jper=1;
if (span>0) {
smooth (n,x,y,w,span,jper,vsmlsq,smo,sc); // fixed span
return 0;
}
// if we made it here, the span will be estimated and variable
for (i=0;i<3;i++) {
smooth (n,x,y,w,spans[i],jper,vsmlsq,sc+2*i*n,sc+6*n);
smooth (n,x,sc+6*n,w,spans[1],-jper,vsmlsq,sc+(2*i+1)*n,sc+7*n);
}
for (j=0;j<n;j++) {
resmin=1.e20;
for (i=0;i<3;i++) {
if (sc[j+(2*i+1)*n]<resmin) {
resmin=sc[j+(2*i+1)*n];
sc[j+6*n]=spans[i];
}
}
if (alpha>0 && alpha<=10 && resmin<sc[j+5*n] && resmin>0) {
tmp = resmin/sc[j+5*n];
if (tmp<1.e-7) tmp=1.e-7;
sc[j+6*n]+=(spans[2]-sc[j+6*n])*pow(tmp,10.0-alpha);
}
}
smooth (n,x,sc+6*n,w,spans[1],-jper,vsmlsq,sc+n,sc+7*n);
for (j=0;j<n;j++) {
if (sc[j+n]<=spans[0]) sc[j+n]=spans[0];
if (sc[j+n]>=spans[2]) sc[j+n]=spans[2];
f=sc[j+n]-spans[1];
if (f<0) {
f/=spans[0]-spans[1];
sc[j+3*n]=(1.0-f)*sc[j+2*n]+f*sc[j];
} else {
f/=spans[2]-spans[1];
sc[j+3*n]=(1.0-f)*sc[j+2*n]+f*sc[j+4*n];
}
}
smooth (n,x,sc+3*n,w,spans[0],-jper,vsmlsq,smo,sc+7*n);
return 0;
}
int smooth (int n, DTYPE * x, DTYPE * y, DTYPE * w, DTYPE span, int iper, DTYPE vsmlsq, DTYPE * smo, DTYPE * acvr) {
int i,j,jper,in,out,ibw,it; //j0,
DTYPE xto,xti;
DTYPE wt,fbo,fbw=0.,xm=0.,ym=0.,tmp,var=0.,cvar=0.,a,h; //,sy
jper=abs(iper);
ibw=0.5*span*n+0.5;
if (ibw<2) ibw=2;
it=2*ibw+1;
for (i=0;i<it;i++) {
j=i;
if (jper==2) j=i-ibw-1;
if (j<0) {
j+=n;
xti=x[j]-1.0;
} else xti=x[j];
wt=w[j];
fbo=fbw;
fbw+=wt;
if (fbw>0) {
xm=(fbo*xm+wt*xti)/fbw;
ym=(fbo*ym+wt*y[j])/fbw;
}
if (fbo>0) {
tmp=fbw*wt*(xti-xm)/fbo;
var+=tmp*(xti-xm);
cvar+=tmp*(y[j]-ym);
}
}
for (j=0;j<n;j++) {
out=j-ibw-1;
in=j+ibw;
if (jper==2 || (out>=0 && in<n)) {
if (in>n-1) {
in-=n;
xti=x[in]+1.0;
} else xti=x[in];
if (out<0) {
out+=n;
xto=x[out]-1.0;
} else xto=x[out];
wt=w[out];
fbo=fbw;
fbw-=wt;
if (fbw>0) {
tmp=fbo*wt*(xto-xm)/fbw;
var-=tmp*(xto-xm);
cvar-=tmp*(y[out]-ym);
}
if (fbw>0) {
xm=(fbo*xm-wt*xto)/fbw;
ym=(fbo*ym-wt*y[out])/fbw;
}
wt=w[in];
fbo=fbw;
fbw+=wt;
if (fbw>0) {
xm=(fbo*xm+wt*xti)/fbw;
ym=(fbo*ym+wt*y[in])/fbw;
}
if (fbo>0) {
tmp=fbw*wt*(xti-xm)/fbo;
var+=tmp*(xti-xm);
cvar+=tmp*(y[in]-ym);
}
}
a=0.0;
if (var>vsmlsq) a=cvar/var;
smo[j]=a*(x[j]-xm)+ym;
if (iper>0) {
h=0.0;
if (fbw>0) h=1.0/fbw;
if (var>vsmlsq) h+=(x[j]-xm)*(x[j]-xm)/var;
acvr[j]=0.0;
a=1.0-w[j]*h;
if (a>0) acvr[j]=fabs(y[j]-smo[j])/a;
else if (j>0) acvr[j]=acvr[j-1];
}
}
//Nat: can be removed
// for (j=0;j<n;j++) {
// sy=smo[j]*w[j];
// fbw=w[j];
// j0=j;
// while (j<n-1 && x[j+1]<=x[j]) {
// j+=1;
// sy+=w[j]*smo[j];
// fbw+=w[j];
// }
// if (j>j0) {
// a=0.0;
// if (fbw>0) a=sy/fbw;
// for (i=j0;i<=j;i++) smo[i]=a;
// }
// }
return 0;
}
int supsmusinglepass(int n, DTYPE * x, DTYPE * y, DTYPE * w, int iper, DTYPE span, DTYPE alpha, DTYPE * smo)
{
int ibw[3];
DTYPE vsmlsq,scale;
DTYPE spans[] = {0.05,0.2,0.5};
int i=n/4-1;
int j=3*(i+1)-1;
scale=x[j]-x[i];
vsmlsq=1.e-6*scale*scale;
for (i=0;i<3;i++) {
ibw[i] = (int)( 0.5*spans[i]*n+0.5 );
if (ibw[i]<2) ibw[i]=2;
}
if (alpha<0) alpha=0;
if (alpha>10) alpha=10;
smoothsinglepass(n, ibw, x, y, w, vsmlsq, alpha, smo);
return 0;
}
void smoothsinglepass(int n, int *ibw, DTYPE *x, DTYPE *y, DTYPE *w, DTYPE vsmlsq, int alpha, DTYPE *smo)
{
int i,j,in,out;
DTYPE wt,xto,xti,yto,yti,ibwb,smo0[3],a,f,chi2,chi2m;
DTYPE fbo,fbw[3],xm[3],ym[3],tmp,var[3]={0,0,0},vary=0.,cvar[3]={0,0,0};
for (i=0;i<3;i++) {
j=n-ibw[i]-1;
xm[i]=x[j]-1.0;
ym[i]=y[j];
fbw[i]=w[j];
for (j=n-ibw[i];j<n;j++) {
xti=x[j]-1.0;
yti=y[j];
wt=w[j];
fbo=fbw[i];
fbw[i]+=wt;
xm[i]=(fbo*xm[i]+wt*xti)/fbw[i];
ym[i]=(fbo*ym[i]+wt*yti)/fbw[i];
tmp=fbw[i]*wt*(xti-xm[i])/fbo;
var[i]+=tmp*(xti-xm[i]);
cvar[i]+=tmp*(yti-ym[i]);
if (i==0) vary+=fbw[0]*wt*(yti-ym[0])*(yti-ym[0])/fbo;
}
for (j=0;j<ibw[i];j++) {
xti=x[j];
yti=y[j];
wt=w[j];
fbo=fbw[i];
fbw[i]+=wt;
xm[i]=(fbo*xm[i]+wt*xti)/fbw[i];
ym[i]=(fbo*ym[i]+wt*yti)/fbw[i];
tmp=fbw[i]*wt*(xti-xm[i])/fbo;
var[i]+=tmp*(xti-xm[i]);
cvar[i]+=tmp*(yti-ym[i]);
if (i==0) vary+=fbw[0]*wt*(yti-ym[0])*(yti-ym[0])/fbo;
}
}
for (j=0;j<n;j++) {
for (i=0;i<3;i++) {
out=j-ibw[i]-1;
in=j+ibw[i];
if (in>n-1) {
in-=n;
xti=x[in]+1.0;
} else xti=x[in];
if (out<0) {
out+=n;
xto=x[out]-1.0;
} else xto=x[out];
yti=y[in];
yto=y[out];
wt=w[out];
fbo=fbw[i];
fbw[i]-=wt;
tmp=fbo*wt*(xto-xm[i])/fbw[i];
var[i]-=tmp*(xto-xm[i]);
cvar[i]-=tmp*(yto-ym[i]);
if (i==0) vary-=fbo*wt*(yto-ym[0])*(yto-ym[0])/fbw[0];
xm[i]=(fbo*xm[i]-wt*xto)/fbw[i];
ym[i]=(fbo*ym[i]-wt*yto)/fbw[i];
wt=w[in];
fbo=fbw[i];
fbw[i]+=wt;
xm[i]=(fbo*xm[i]+wt*xti)/fbw[i];
ym[i]=(fbo*ym[i]+wt*yti)/fbw[i];
tmp=fbw[i]*wt*(xti-xm[i])/fbo;
var[i]+=tmp*(xti-xm[i]);
cvar[i]+=tmp*(yti-ym[i]);
if (i==0) vary+=fbw[0]*wt*(yti-ym[0])*(yti-ym[0])/fbo;
}
chi2m=1.e20; ibwb=ibw[2];
for (i=0;i<3;i++) {
a=0.0;
if (var[i]>vsmlsq) a=cvar[i]/var[i];
smo0[i]=a*(x[j]-xm[i])+ym[i];
chi2 = vary-2*a*cvar[0]+a*a*var[0];
if (i>0) {
tmp = ym[i]-ym[0]-a*(xm[i]-xm[0]);
chi2 += tmp*tmp*fbw[0];
}
tmp=1.0/fbw[i];
if (var[i]>vsmlsq) tmp+=(x[j]-xm[i])*(x[j]-xm[i])/var[i];
tmp = 1.0 - w[j]*tmp;
chi2 = fabs(chi2)/(tmp*tmp);
if (chi2<chi2m) {
chi2m=chi2;
ibwb=(ibw[1]+ibw[i])/2.;
}
}
tmp = sqrt(chi2m/chi2);
if (tmp<1.e-7) tmp=1.e-7;
ibwb+=(ibw[2]-ibwb)*pow(tmp,10.-alpha);
f = ibwb-ibw[1];
if (f<0) {
f/=ibw[0]-ibw[1];
smo[j]=(1.0-f)*smo0[1]+f*smo0[0];
} else {
f/=ibw[2]-ibw[1];
smo[j]=(1.0-f)*smo0[1]+f*smo0[2];
}
}
}
void supsmu_periodogram_innerloopcpu(int iteration, int n, DTYPE freqToTest, DTYPE * time, DTYPE * data, DTYPE * error, DTYPE alpha, DTYPE * pgram,
DTYPE * tt, DTYPE * weights, DTYPE * chi2, DTYPE * sc, DTYPE * smo, DTYPE * t1, int * argkeys, DTYPE * t1_sortby_argkeys,
DTYPE * data_sortby_argkeys, DTYPE * weights_sortby_argkeys)
{
int tid=omp_get_thread_num();
//Offsets into arrays for each thread
unsigned int offset_n=tid*n;
unsigned int offset_sc=tid*n*8;
DTYPE p=1.0/freqToTest;
for (int j=0; j<n; j++)
{
t1[offset_n+j]=fmod(tt[j],p)/p;
}
sortKeyValuePairsIntFloatDouble(argkeys+offset_n, t1+offset_n, n);
//Map t1, data, and weights to the order given by argsorting t1
mapArr(t1+offset_n, t1_sortby_argkeys+offset_n, argkeys+offset_n, n);
mapArr(data, data_sortby_argkeys+offset_n, argkeys+offset_n, n);
mapArr(weights, weights_sortby_argkeys+offset_n, argkeys+offset_n, n);
chi2[iteration]=supsmu_chi2(n, t1_sortby_argkeys+offset_n, data_sortby_argkeys+offset_n, weights_sortby_argkeys+offset_n, smo+offset_n, sc+offset_sc, alpha);
}
void supsmu_singlepass_periodogram_innerloopcpu(int iteration, int n, DTYPE freqToTest, DTYPE * time, DTYPE * data, DTYPE * error, DTYPE alpha, DTYPE * pgram,
DTYPE * tt, DTYPE * weights, DTYPE * chi2, DTYPE * smo, DTYPE * t1, int * argkeys, DTYPE * t1_sortby_argkeys,
DTYPE * data_sortby_argkeys, DTYPE * weights_sortby_argkeys)
{
int tid=omp_get_thread_num();
//Offsets into arrays for each thread
unsigned int offset_n=tid*n;
DTYPE p=1.0/freqToTest;
for (int j=0; j<n; j++)
{
t1[offset_n+j]=fmod(tt[j],p)/p;
}
sortKeyValuePairsIntFloatDouble(argkeys+offset_n, t1+offset_n, n);
// printf("\nCPU argkeys: ");
// for (int j=0; j<n; j++)
// {
// printf("\n%d",argkeys[offset_n+j]);
// }
//Map t1, data, and weights to the order given by argsorting t1
mapArr(t1+offset_n, t1_sortby_argkeys+offset_n, argkeys+offset_n, n);
mapArr(data, data_sortby_argkeys+offset_n, argkeys+offset_n, n);
mapArr(weights, weights_sortby_argkeys+offset_n, argkeys+offset_n, n);
// printf("\nCPU t1: ");
// for (int j=0; j<n; j++)
// {
// printf("\n%f",t1[offset_n+j]);
// }
// printf("\nCPU argkeys: ");
// for (int j=0; j<n; j++)
// {
// printf("\n%d",argkeys[offset_n+j]);
// }
// printf("\nCPU t1_sortby_argkeys: ");
// for (int j=0; j<n; j++)
// {
// printf("\n%f",t1_sortby_argkeys[offset_n+j]);
// }
// printf("\n****************");
chi2[iteration]=supsmu_singlepass_chi2(n, t1_sortby_argkeys+offset_n, data_sortby_argkeys+offset_n, weights_sortby_argkeys+offset_n, smo+offset_n, alpha);
}
//single object processing
//MODEFLAG- 0 default supersmoother with multiple passes
//MODEFLAG- 1 Nat's singlepass supersmoother
void supersmoothercpusingleobject(bool MODEFLAG, DTYPE * time, DTYPE * data, DTYPE * error, const unsigned int sizeData,
const unsigned int numFreqs, const DTYPE minFreq, const DTYPE maxFreq, const DTYPE freqStep, DTYPE alpha,
DTYPE * pgram, DTYPE * foundPeriod, DTYPE * chi2, DTYPE * sc, DTYPE * smo, DTYPE * t1, int * argkeys,
DTYPE * t1_sortby_argkeys, DTYPE * data_sortby_argkeys, DTYPE * weights_sortby_argkeys, DTYPE * weights, DTYPE * tt)
{
DTYPE chi0=0;
compute_chi0_tt_weights(sizeData, time, data, error, &chi0, tt, weights);
//Default supersmoother
if(MODEFLAG==0)
{
//Single object -- parallelize over frequencies
#pragma omp parallel for num_threads(NTHREADSCPU) schedule(static)
for (unsigned int i=0; i<numFreqs; i++)
{
DTYPE freqToTest=minFreq+(freqStep*i);
supsmu_periodogram_innerloopcpu(i, sizeData, freqToTest, time, data, error, alpha, pgram, tt, weights, chi2, sc, smo, t1, argkeys, t1_sortby_argkeys, data_sortby_argkeys, weights_sortby_argkeys);
}
}
//Nat's single pass supersmoother
else if(MODEFLAG==1)
{
printf("\nRunning single pass");
//Single object -- parallelize over frequencies
#pragma omp parallel for num_threads(NTHREADSCPU) schedule(static)
for (unsigned int i=0; i<numFreqs; i++)
{
DTYPE freqToTest=minFreq+(freqStep*i);
supsmu_singlepass_periodogram_innerloopcpu(i, sizeData, freqToTest, time, data, error, alpha, pgram, tt, weights, chi2, smo, t1, argkeys, t1_sortby_argkeys, data_sortby_argkeys, weights_sortby_argkeys);
}
}
for (unsigned int i=0; i<numFreqs; i++)
{
pgram[i]=(0.5*(chi0-chi2[i])*sizeData)/chi0;
}
computePeriodSuperSmoother(pgram, numFreqs, minFreq, maxFreq, foundPeriod);
}
void computePeriodSuperSmoother(DTYPE * pgram, const unsigned int numFreqs, const DTYPE minFreq, const DTYPE maxFreq, DTYPE * foundPeriod)
{
DTYPE deltaf=(maxFreq-minFreq)/(numFreqs*1.0);
int maxPowerIdx=0;
DTYPE maxPower=pgram[0];
for (unsigned int i=0; i<numFreqs; i++)
{
if (pgram[i]>maxPower)
{
maxPower=pgram[i];
maxPowerIdx=i;
}
}
printf("\nFreq: %f, maxpowerIdx: %d",(minFreq+(maxPowerIdx*deltaf)), maxPowerIdx);
*foundPeriod=1.0/(minFreq+(maxPowerIdx*deltaf));
}
//MODEFLAG-0 original supsmu (multi-pass)
//MODEFLAG-1 Nat's Single-pass supsmu
void supersmootherCPUBatch(bool MODEFLAG, unsigned int * objectId, DTYPE * time, DTYPE * data, DTYPE * error, unsigned int sizeData, const DTYPE minFreq, const DTYPE maxFreq,
const unsigned int numFreqs, DTYPE * sumPeriods, DTYPE ** pgram, DTYPE * foundPeriod, DTYPE alpha,
DTYPE * chi2, DTYPE * sc, DTYPE * smo, DTYPE * t1, int * argkeys,
DTYPE * t1_sortby_argkeys, DTYPE * data_sortby_argkeys, DTYPE * weights_sortby_argkeys, DTYPE * weights, DTYPE * tt)
{
//compute the object ranges in the arrays and store in struct
//This is given by the objectId
struct lookupObj * objectLookup=NULL;
unsigned int numUniqueObjects;
computeObjectRanges(objectId, &sizeData, &objectLookup, &numUniqueObjects);
#ifndef PYTHON
*pgram=(DTYPE *)malloc(sizeof(DTYPE)*(numFreqs)*numUniqueObjects);
#endif
foundPeriod=(DTYPE *)malloc(sizeof(DTYPE)*numUniqueObjects);
const DTYPE freqStep=(maxFreq-minFreq)/(numFreqs*1.0);
//number of objects skipped because they didn't have enough observations
unsigned int countSkippedObjectsThresh=0;
//for each object, call the parallel cpu algorithm
// for (unsigned int i=0; i<numUniqueObjects; i++)
for (unsigned int i=0; i<numUniqueObjects; i++)
{
unsigned int idxMin=objectLookup[i].idxMin;
unsigned int idxMax=objectLookup[i].idxMax;
unsigned int sizeDataForObject=idxMax-idxMin+1;
uint64_t pgramOffset=(uint64_t)i*(uint64_t)numFreqs;
//make sure the object has at least OBJTHRESH observations
if (sizeDataForObject>=OBSTHRESH)
{
supersmoothercpusingleobject(MODEFLAG, &time[idxMin], &data[idxMin], &error[idxMin], sizeDataForObject,
numFreqs, minFreq, maxFreq, freqStep, alpha, *pgram+pgramOffset, foundPeriod+i, chi2, sc,
smo, t1, argkeys, t1_sortby_argkeys, data_sortby_argkeys, weights_sortby_argkeys, weights, tt);
}
//too few data points to compute the periods
else
{
countSkippedObjectsThresh++;
foundPeriod[i]=0.0;
}
printf("\nObject: %d, Period: %f",objectLookup[i].objId, foundPeriod[i]);
}
printf("\nNumber of objects skipped because they didn't have %d observations: %u", OBSTHRESH, countSkippedObjectsThresh);
#if PRINTPERIODS==1
for (unsigned int i=0; i<numUniqueObjects; i++)
{
printf("\nObject: %d, Period: %f",objectLookup[i].objId, foundPeriod[i]);
}
#endif
//Validation
for (unsigned int i=0; i<numUniqueObjects; i++)
{
(*sumPeriods)+=foundPeriod[i];
}
}
//original port from Nat's code
//First step: making one big function except the kernel call
//will call on the CPU first for testing
void supsmu_periodogram_GPU_Batch_prototype(unsigned int * objectId, unsigned int sizeData, const DTYPE minFreq, const DTYPE maxFreq, int numFreq, DTYPE * time, DTYPE * data, DTYPE * error, DTYPE alpha, DTYPE * pgram, DTYPE * foundPeriod)
{
struct lookupObj * objectLookup=NULL;
unsigned int numUniqueObjects;
computeObjectRanges(objectId, &sizeData, &objectLookup, &numUniqueObjects);
pgram=(DTYPE *)malloc(sizeof(DTYPE)*(numFreq)*numUniqueObjects);
foundPeriod=(DTYPE *)malloc(sizeof(DTYPE)*numUniqueObjects);
//Allocate once
DTYPE * chi2=(DTYPE *)malloc(sizeof(DTYPE)*numFreq*numUniqueObjects);
DTYPE * y = (DTYPE *)malloc(sizeof(DTYPE)*sizeData);
DTYPE * weights = (DTYPE *)malloc(sizeof(DTYPE)*sizeData);
DTYPE * tt = (DTYPE *)malloc(sizeof(DTYPE)*sizeData);
DTYPE deltaf=(maxFreq-minFreq)/(numFreq*1.0);
//compute minimum time
DTYPE minTime=time[0];
for (unsigned int i=0; i<sizeData; i++)
{
if (time[i]<minTime)
{
minTime=time[i];
}
}
DTYPE w0=0.0;
DTYPE tmp=0;
for (unsigned int i=0; i<sizeData; i++)
{
tt[i]=time[i]-minTime;
weights[i]=1.0/(error[i]*error[i]);
w0+=weights[i];
tmp+=(data[i]*weights[i]);
}
w0=w0/(sizeData*1.0);
tmp=tmp/(sizeData*1.0);
DTYPE y0=tmp/w0;
std::copy(data, data+sizeData, y);
// DTYPE tmp=0;
// for (int i=0; i<sizeData; i++)
// {
// tmp+=(data[i]*weights[i]);
// }
// tmp=tmp/(sizeData*1.0);
// DTYPE y0=tmp/w0;
tmp=0;
for (unsigned int i=0; i<sizeData; i++)
{
y[i]=y[i]-y0;
tmp+=(y[i]*y[i])*weights[i];
}
//
// tmp=0;
// for (int i=0; i<sizeData; i++)
// {
// tmp+=(y[i]*y[i])*weights[i];
// }
DTYPE chi0=tmp/(sizeData*1.0);
//Arrays that need to be allocated for each thread
DTYPE * sc=(DTYPE *)malloc(sizeof(DTYPE)*sizeData*7*NTHREADSCPU);
DTYPE * smo=(DTYPE *)malloc(sizeof(DTYPE)*sizeData*NTHREADSCPU);
DTYPE * t1=(DTYPE *)malloc(sizeof(DTYPE)*sizeData*NTHREADSCPU);
int * argkeys=(int *)malloc(sizeof(int)*sizeData*NTHREADSCPU);
DTYPE * t1_sortby_argkeys=(DTYPE *)malloc(sizeof(DTYPE)*sizeData*NTHREADSCPU);
DTYPE * data_sortby_argkeys=(DTYPE *)malloc(sizeof(DTYPE)*sizeData*NTHREADSCPU);
DTYPE * weights_sortby_argkeys=(DTYPE *)malloc(sizeof(DTYPE)*sizeData*NTHREADSCPU);
double tstart=omp_get_wtime();
for (int i=0; i<numFreq; i++)
{
// int tid=omp_get_thread_num();
// //Offsets into arrays for each thread
// unsigned int offset_n=tid*n;
// unsigned int offset_sc=tid*n*8;
DTYPE p=1.0/(minFreq+(deltaf*i));
for (unsigned int j=0; j<sizeData; j++)
{
t1[j]=fmod(tt[j],p)/p;
}
//Do argsort on t1
// sortKeyValuePairsIntDouble(argkeys, t1, sizeData);
sortKeyValuePairsIntFloatDouble(argkeys, t1, sizeData);
//Map t1, data, and weights to the order given by argsorting t1
mapArr(t1, t1_sortby_argkeys, argkeys, sizeData);
mapArr(data, data_sortby_argkeys, argkeys, sizeData);
mapArr(weights, weights_sortby_argkeys, argkeys, sizeData);
// chi2[i]=supsmu_chi2(n, t1_sortby_argkeys+offset_n, data_sortby_argkeys+offset_n, weights_sortby_argkeys+offset_n, smo+offset_n, sc+offset_sc, alpha);
//chi2
int iper=1;
DTYPE span=0.0;
supsmu(sizeData, t1_sortby_argkeys, data_sortby_argkeys, weights_sortby_argkeys, iper, span, alpha, smo, sc);
DTYPE tmptotal=0;
for (unsigned int k=0; k<sizeData; k++){
tmptotal+=((data_sortby_argkeys[k]-smo[k])*(data_sortby_argkeys[k]-smo[k]))*weights_sortby_argkeys[k];
}
chi2[i]=tmptotal/(sizeData*1.0);
}
double tend=omp_get_wtime();
printf("\nTime main loop: %f", tend - tstart);
for (int i=0; i<numFreq; i++)
{
pgram[i]=(0.5*(chi0-chi2[i])*sizeData)/chi0;
}
// double foundperiodtest=0;
int objIdx=0; // placeholder until we loop over objects
computePeriodSuperSmoother(pgram, numFreq, minFreq, maxFreq, &foundPeriod[objIdx]);
printf("\nFound period: %f", foundPeriod[objIdx]);
}
//First step: making one big function except the kernel call
//will call on the CPU first for testing
void supsmu_periodogram_GPU_BatchOneThread(unsigned int * objectId, unsigned int sizeData, const DTYPE minFreq, const DTYPE maxFreq, int numFreq, DTYPE * time, DTYPE * data, DTYPE * error, DTYPE alpha, DTYPE * pgram, DTYPE * foundPeriod)
{
struct lookupObj * objectLookup=NULL;
unsigned int numUniqueObjects;
computeObjectRanges(objectId, &sizeData, &objectLookup, &numUniqueObjects);
pgram=(DTYPE *)malloc(sizeof(DTYPE)*(numFreq)*numUniqueObjects);
foundPeriod=(DTYPE *)malloc(sizeof(DTYPE)*numUniqueObjects);
//Allocate once
DTYPE * chi2=(DTYPE *)malloc(sizeof(DTYPE)*numFreq*numUniqueObjects);
DTYPE * y = (DTYPE *)malloc(sizeof(DTYPE)*sizeData);
DTYPE * weights = (DTYPE *)malloc(sizeof(DTYPE)*sizeData);
DTYPE * tt = (DTYPE *)malloc(sizeof(DTYPE)*sizeData);
DTYPE deltaf=(maxFreq-minFreq)/(numFreq*1.0);
//compute minimum time
DTYPE minTime=time[0];
for (unsigned int i=0; i<sizeData; i++)
{
if (time[i]<minTime)
{
minTime=time[i];
}
}
DTYPE w0=0.0;
DTYPE tmp=0;
for (unsigned int i=0; i<sizeData; i++)
{
tt[i]=time[i]-minTime;
weights[i]=1.0/(error[i]*error[i]);
w0+=weights[i];
tmp+=(data[i]*weights[i]);
}
w0=w0/(sizeData*1.0);
tmp=tmp/(sizeData*1.0);
DTYPE y0=tmp/w0;
std::copy(data, data+sizeData, y);
// DTYPE tmp=0;
// for (int i=0; i<sizeData; i++)
// {
// tmp+=(data[i]*weights[i]);
// }
// tmp=tmp/(sizeData*1.0);
// DTYPE y0=tmp/w0;
tmp=0;
for (unsigned int i=0; i<sizeData; i++)
{
y[i]=y[i]-y0;
tmp+=(y[i]*y[i])*weights[i];
}
//
// tmp=0;
// for (int i=0; i<sizeData; i++)
// {
// tmp+=(y[i]*y[i])*weights[i];
// }
DTYPE chi0=tmp/(sizeData*1.0);
//Arrays that need to be allocated for each thread
DTYPE * sc=(DTYPE *)malloc(sizeof(DTYPE)*sizeData*7);
DTYPE * smo=(DTYPE *)malloc(sizeof(DTYPE)*sizeData);
DTYPE * t1=(DTYPE *)malloc(sizeof(DTYPE)*sizeData);
int * argkeys=(int *)malloc(sizeof(int)*sizeData);
DTYPE * t1_sortby_argkeys=(DTYPE *)malloc(sizeof(DTYPE)*sizeData);
DTYPE * data_sortby_argkeys=(DTYPE *)malloc(sizeof(DTYPE)*sizeData);
DTYPE * weights_sortby_argkeys=(DTYPE *)malloc(sizeof(DTYPE)*sizeData);
DTYPE * dev_sc;
DTYPE * dev_smo;
DTYPE * dev_t1;
// int * dev_argkeys;
DTYPE * dev_t1_sortby_argkeys;
DTYPE * dev_data_sortby_argkeys;
DTYPE * dev_weights_sortby_argkeys;
//allocate memory on the GPU
gpuErrchk(cudaMalloc((void**)&dev_sc, sizeof(DTYPE)*(sizeData*7)));
gpuErrchk(cudaMalloc((void**)&dev_smo, sizeof(DTYPE)*(sizeData)));
gpuErrchk(cudaMalloc((void**)&dev_t1, sizeof(DTYPE)*(sizeData)));
// gpuErrchk(cudaMalloc((void**)&dev_argkeys, sizeof(int)*(sizeData)));
gpuErrchk(cudaMalloc((void**)&dev_t1_sortby_argkeys, sizeof(DTYPE)*(sizeData)));
gpuErrchk(cudaMalloc((void**)&dev_data_sortby_argkeys, sizeof(DTYPE)*(sizeData)));
gpuErrchk(cudaMalloc((void**)&dev_weights_sortby_argkeys, sizeof(DTYPE)*(sizeData)));
double tstart=omp_get_wtime();
for (int i=0; i<numFreq; i++)
{
// int tid=omp_get_thread_num();
// //Offsets into arrays for each thread
// unsigned int offset_n=tid*n;
// unsigned int offset_sc=tid*n*8;
DTYPE p=1.0/(minFreq+(deltaf*i));
for (unsigned int j=0; j<sizeData; j++)
{
t1[j]=fmod(tt[j],p)/p;
}
//Do argsort on t1
sortKeyValuePairsIntFloatDouble(argkeys, t1, sizeData);
//Map t1, data, and weights to the order given by argsorting t1
mapArr(t1, t1_sortby_argkeys, argkeys, sizeData);
mapArr(data, data_sortby_argkeys, argkeys, sizeData);
mapArr(weights, weights_sortby_argkeys, argkeys, sizeData);
// chi2[i]=supsmu_chi2(n, t1_sortby_argkeys+offset_n, data_sortby_argkeys+offset_n, weights_sortby_argkeys+offset_n, smo+offset_n, sc+offset_sc, alpha);
//chi2
int iper=1;
DTYPE span=0.0;
//copy data to the GPU
// gpuErrchk(cudaMemcpy( dev_timeX, timeX, sizeof(DTYPE)*(*sizeData), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy( dev_sc, sc, sizeof(DTYPE)*sizeData*7, cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy( dev_t1, t1, sizeof(DTYPE)*sizeData, cudaMemcpyHostToDevice));
// gpuErrchk(cudaMemcpy( dev_argkeys, argkeys, sizeof(int)*sizeData, cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy( dev_t1_sortby_argkeys, t1_sortby_argkeys, sizeof(DTYPE)*sizeData, cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy( dev_data_sortby_argkeys, data_sortby_argkeys, sizeof(DTYPE)*sizeData, cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy( dev_weights_sortby_argkeys,weights_sortby_argkeys, sizeof(DTYPE)*sizeData, cudaMemcpyHostToDevice));
//For testing, compute this on the GPU
// supsmu(sizeData, t1_sortby_argkeys, data_sortby_argkeys, weights_sortby_argkeys, iper, span, alpha, smo, sc);
const int constSizeData=(int)sizeData;
const int constalpha=alpha;
const DTYPE constspan=span;
const int constiper=iper;
supsmukernelOneThread<<<1,1>>>(constSizeData, dev_t1_sortby_argkeys, dev_data_sortby_argkeys, dev_weights_sortby_argkeys, constiper, constspan, constalpha, dev_smo, dev_sc);
gpuErrchk(cudaMemcpy( smo, dev_smo, sizeof(DTYPE)*sizeData, cudaMemcpyDeviceToHost));
DTYPE tmptotal=0;
for (unsigned int k=0; k<sizeData; k++){
tmptotal+=((data_sortby_argkeys[k]-smo[k])*(data_sortby_argkeys[k]-smo[k]))*weights_sortby_argkeys[k];
}
chi2[i]=tmptotal/(sizeData*1.0);
}
double tend=omp_get_wtime();
printf("\nTime main loop: %f", tend - tstart);
for (int i=0; i<numFreq; i++)
{
pgram[i]=(0.5*(chi0-chi2[i])*sizeData)/chi0;
}
// double foundperiodtest=0;
int objIdx=0; // placeholder until we loop over objects
computePeriodSuperSmoother(pgram, numFreq, minFreq, maxFreq, &foundPeriod[objIdx]);
printf("\nFound period: %f", foundPeriod[objIdx]);
}
//Need to do back to back sorts to sort the t1 by argkeys for each frequency
//Need 3 arrays:
//first sort the keys (argkeys) by the values (t1)
//then sort the argkeys/t1 by the freqarr
void backToBackSort(int * dev_argkeys, int * dev_freqarr, DTYPE * dev_t1, int sizeData, int numFreq, cudaStream_t stream)
{
thrust::device_ptr<int> dev_argkeys_ptr(dev_argkeys);
thrust::device_ptr<DTYPE> dev_t1_ptr(dev_t1);
thrust::device_ptr<int> dev_freqarr_ptr(dev_freqarr);
try{
thrust::stable_sort_by_key(thrust::cuda::par.on(stream), dev_t1_ptr, dev_t1_ptr + (sizeData*numFreq),
thrust::make_zip_iterator(thrust::make_tuple(dev_argkeys_ptr, dev_freqarr_ptr)));
thrust::stable_sort_by_key(thrust::cuda::par.on(stream), dev_freqarr_ptr, dev_freqarr_ptr + (sizeData*numFreq),
thrust::make_zip_iterator(thrust::make_tuple(dev_argkeys_ptr, dev_t1_ptr)));
}
catch(thrust::system_error e)
{
std::cerr << "Error inside sort: " << e.what() << std::endl;
exit(-1);
}
}
//GPU supersmoother original with multiple passes
void supsmu_original_single_object(unsigned int * objectId, unsigned int sizeData, const DTYPE minFreq, const DTYPE maxFreq, int numFreq, DTYPE * time, DTYPE * data, DTYPE * error, DTYPE alpha, DTYPE * pgram, DTYPE * foundPeriod, double underestGPUcapacityGiB)
{
double tstartcpu=omp_get_wtime();
int iper=1;
DTYPE span=0.0;
DTYPE * weights = (DTYPE *)malloc(sizeof(DTYPE)*sizeData);
DTYPE * tt = (DTYPE *)malloc(sizeof(DTYPE)*sizeData);
const DTYPE deltaf=(maxFreq-minFreq)/(numFreq*1.0);
DTYPE chi0=0;
compute_chi0_tt_weights(sizeData, time, data, error, &chi0, tt, weights);
double tendcpu=omp_get_wtime();
printf("\nTime CPU preamble: %f", tendcpu-tstartcpu);
////////////////////////
//for batching the frequencies
//first 0-refers to using original supsmu mode
//second 0-refers to using NUMGPUs when computing the number of batches
unsigned int numBatches=computeNumBatches(0, sizeData, numFreq, underestGPUcapacityGiB, 0);
//upper limit on the number of frequencies in a batch
int numFreqPerBatch=ceil(numFreq*1.0/numBatches*1.0);
printf("\nNumber of batches: %d, Number of frequencies per batch: %d", numBatches, numFreqPerBatch);fflush(stdout);
double tstartcreatestream=omp_get_wtime();
cudaStream_t batchstreams[NSTREAMSPERGPU*NUMGPU];
createStreams(batchstreams, NUMGPU, NSTREAMSPERGPU);
double tendcreatestream=omp_get_wtime();
// printf("\nTime to create streams: %f", tendcreatestream - tstartcreatestream);
//End for batching frequencies
////////////////////////
int * dev_freqarr[NUMGPU];
DTYPE * dev_smo[NUMGPU];
DTYPE * dev_t1[NUMGPU];
int * dev_argkeys[NUMGPU];
DTYPE * dev_t1_sortby_argkeys[NUMGPU];
DTYPE * dev_data_sortby_argkeys[NUMGPU];
DTYPE * dev_weights_sortby_argkeys[NUMGPU];
DTYPE * dev_tt[NUMGPU];
DTYPE * dev_data[NUMGPU];
DTYPE * dev_weights[NUMGPU];
DTYPE * dev_sc[NUMGPU];
DTYPE * dev_pgram[NUMGPU];
#pragma omp parallel for num_threads(NUMGPU)
for (int i=0; i<NUMGPU; i++)
{
int globaltid=omp_get_thread_num();
int tid=globaltid%NSTREAMSPERGPU;
int gpuid=globaltid/NSTREAMSPERGPU;
int streamnum=(gpuid*NSTREAMSPERGPU)+tid;
cudaSetDevice(i);
//Those that depend on the number of frequencies (not the number per batch)
gpuErrchk(cudaMalloc((void**)&dev_pgram[i], sizeof(DTYPE)*numFreq));
//Arrays broken up into batches based on frequency
gpuErrchk(cudaMalloc((void**)&dev_freqarr[i], sizeof(int)*(sizeData*numFreqPerBatch*NSTREAMSPERGPU)));
gpuErrchk(cudaMalloc((void**)&dev_argkeys[i], sizeof(int)*(sizeData*numFreqPerBatch*NSTREAMSPERGPU)));
gpuErrchk(cudaMalloc((void**)&dev_sc[i], sizeof(DTYPE)*(sizeData*8*numFreqPerBatch*NSTREAMSPERGPU)));
gpuErrchk(cudaMalloc((void**)&dev_smo[i], sizeof(DTYPE)*(sizeData*numFreqPerBatch*NSTREAMSPERGPU)));
gpuErrchk(cudaMalloc((void**)&dev_t1[i], sizeof(DTYPE)*(sizeData*numFreqPerBatch*NSTREAMSPERGPU)));
gpuErrchk(cudaMalloc((void**)&dev_t1_sortby_argkeys[i], sizeof(DTYPE)*(sizeData*numFreqPerBatch*NSTREAMSPERGPU)));
gpuErrchk(cudaMalloc((void**)&dev_data_sortby_argkeys[i], sizeof(DTYPE)*(sizeData*numFreqPerBatch*NSTREAMSPERGPU)));
gpuErrchk(cudaMalloc((void**)&dev_weights_sortby_argkeys[i], sizeof(DTYPE)*(sizeData*numFreqPerBatch*NSTREAMSPERGPU)));
//allocate on the GPU
gpuErrchk(cudaMalloc((void**)&dev_tt[i], sizeof(DTYPE)*sizeData));
gpuErrchk(cudaMalloc((void**)&dev_data[i], sizeof(DTYPE)*sizeData));
gpuErrchk(cudaMalloc((void**)&dev_weights[i], sizeof(DTYPE)*sizeData));
//copy to the GPU
gpuErrchk(cudaMemcpyAsync( dev_tt[i], tt, sizeof(DTYPE)*sizeData, cudaMemcpyHostToDevice, batchstreams[streamnum]));
gpuErrchk(cudaMemcpyAsync( dev_data[i], data, sizeof(DTYPE)*sizeData, cudaMemcpyHostToDevice, batchstreams[streamnum]));
gpuErrchk(cudaMemcpyAsync( dev_weights[i], weights, sizeof(DTYPE)*sizeData, cudaMemcpyHostToDevice, batchstreams[streamnum]));
}
//Loop over batches
#pragma omp parallel for num_threads(NUMGPU*NSTREAMSPERGPU)
for (unsigned int i=0; i<numBatches; i++)
{
int globaltid=omp_get_thread_num();
//thread id for a single GPU
int tid=globaltid%NSTREAMSPERGPU;
int gpuid=globaltid/NSTREAMSPERGPU;
uint64_t batchWriteOffset=(uint64_t)i*(uint64_t)numFreqPerBatch;
uint64_t offsetFreqId=(uint64_t)i*(uint64_t)numFreqPerBatch;
int numFreqInBatch=numFreqPerBatch;
int streamOffset=sizeData*numFreqPerBatch*tid;
int streamnum=(gpuid*NSTREAMSPERGPU)+tid;
cudaSetDevice(gpuid);
//last batch has fewer frequencies
if((numBatches!=1)&&(i==(numBatches-1)))
{
numFreqInBatch=min(numFreqInBatch,((int)numFreq)-((i)*numFreqPerBatch));
}
printf("\nglobal tid: %d, tid: %d, gpuid: %d, Stream num: %d, Batch Number: %d, number of frequencies: %d",globaltid, tid, gpuid, streamnum, i, numFreqInBatch);
unsigned int NUMBLOCKSDATAFREQ=ceil((sizeData*numFreqInBatch*1.0)/LARGEBLOCKSIZE*1.0);
computePeriodModFOneThreadPerUpdate<<<NUMBLOCKSDATAFREQ,LARGEBLOCKSIZE,0,batchstreams[streamnum]>>>(sizeData, numFreqInBatch, minFreq, offsetFreqId, deltaf, &dev_t1[gpuid][streamOffset], dev_tt[gpuid]);
//Initialize the key arrays
initializeKeyArraysOneThreadPerUpdate<<<NUMBLOCKSDATAFREQ,LARGEBLOCKSIZE,0,batchstreams[streamnum]>>>(sizeData, numFreqInBatch, &dev_argkeys[gpuid][streamOffset], &dev_freqarr[gpuid][streamOffset]);
//Need to do back to back sorts to sort the t1 by argkeys for each frequency
//Need 3 arrays:
//first sort the keys (argkeys) by the values (t1)
//then sort the argkeys/t1 by the freqarr
backToBackSort(&dev_argkeys[gpuid][streamOffset], &dev_freqarr[gpuid][streamOffset], &dev_t1[gpuid][streamOffset], sizeData, numFreqInBatch, batchstreams[streamnum]);
//Map the keys based on argkeys
//Separate map and transform
mapUsingArgKeysOneThreadPerUpdate<<<NUMBLOCKSDATAFREQ,LARGEBLOCKSIZE,0,batchstreams[streamnum]>>>(sizeData, numFreqInBatch, &dev_argkeys[gpuid][streamOffset], &dev_data[gpuid][0], &dev_weights[gpuid][0], &dev_t1[gpuid][streamOffset], &dev_t1_sortby_argkeys[gpuid][streamOffset], &dev_data_sortby_argkeys[gpuid][streamOffset], &dev_weights_sortby_argkeys[gpuid][streamOffset]);
///////////////////////////////
// Main kernels
///////////////////////////////
//global memory only
#if ORIGINALMODE==0
const unsigned int numBlocks=ceil((numFreqInBatch*1.0)/(SMALLBLOCKSIZE*1.0));
supsmukernel<<<numBlocks,SMALLBLOCKSIZE,0,batchstreams[streamnum]>>>(numFreqInBatch, sizeData, iper, span, alpha, &dev_smo[gpuid][streamOffset], &dev_sc[gpuid][streamOffset],
&dev_t1_sortby_argkeys[gpuid][streamOffset], &dev_data_sortby_argkeys[gpuid][streamOffset], &dev_weights_sortby_argkeys[gpuid][streamOffset]);
#endif
//One block per frequency with SM -- use small block size (e.g., 32 threads/block)
#if ORIGINALMODE==1
//Shared memory for x,y,z arrays and other information
const unsigned int SMSIZE=sizeof(DTYPE)*3*sizeData;
supsmukernelSMOneFreqBlock<<<numFreqInBatch,SMALLBLOCKSIZE,SMSIZE,batchstreams[streamnum]>>>(sizeData, iper, span, alpha, &dev_smo[gpuid][streamOffset], &dev_sc[gpuid][streamOffset],
&dev_t1_sortby_argkeys[gpuid][streamOffset], &dev_data_sortby_argkeys[gpuid][streamOffset], &dev_weights_sortby_argkeys[gpuid][streamOffset]);
#endif
#if ORIGINALMODE==2
//uses one thread per frequency with shared memory for x,y,z arrays
const unsigned int numBlocks=ceil((numFreqInBatch*1.0)/(SMALLBLOCKSIZE*1.0));
const unsigned int SMSIZEDATA=sizeof(DTYPE)*3*sizeData*SMALLBLOCKSIZE;
supsmukernelSMOneThreadPerFreq<<<numBlocks,SMALLBLOCKSIZE,SMSIZEDATA,batchstreams[streamnum]>>>(numFreqInBatch, sizeData, iper, span, alpha,
&dev_smo[gpuid][streamOffset], &dev_sc[gpuid][streamOffset], &dev_t1_sortby_argkeys[gpuid][streamOffset], &dev_data_sortby_argkeys[gpuid][streamOffset], &dev_weights_sortby_argkeys[gpuid][streamOffset]);
#endif
//Cascade the execution so that it is robust to running out of shared memory
//Try executing SM kernel 1 thread per freq
//then global memory kernel (which is guaranteed to execute)
#if ORIGINALMODE==-1
printf("\n[CASCADE] Cascade mode, launching SM one thread per frequency");
//First, attempt 1 thread per frequency with SM
const unsigned int numBlocks=ceil((numFreqInBatch*1.0)/(SMALLBLOCKSIZE*1.0));
const unsigned int SMSIZEDATA=sizeof(DTYPE)*3*sizeData*SMALLBLOCKSIZE;
supsmukernelSMOneThreadPerFreq<<<numBlocks,SMALLBLOCKSIZE,SMSIZEDATA,batchstreams[streamnum]>>>(numFreqInBatch, sizeData, iper, span, alpha,
&dev_smo[gpuid][streamOffset], &dev_sc[gpuid][streamOffset], &dev_t1_sortby_argkeys[gpuid][streamOffset], &dev_data_sortby_argkeys[gpuid][streamOffset], &dev_weights_sortby_argkeys[gpuid][streamOffset]);
// if (err != cudaSuccess)
// {
// printf("\n[CASCADE] Launching SM- 1 block per frequency");
// const unsigned int SMSIZE=sizeof(DTYPE)*3*sizeData;
// supsmukernelSMOneFreqBlock<<<numFreqInBatch,SMALLBLOCKSIZE,SMSIZE,batchstreams[streamnum]>>>(sizeData, iper, span, alpha, &dev_smo[gpuid][streamOffset], &dev_sc[gpuid][streamOffset],
// &dev_t1_sortby_argkeys[gpuid][streamOffset], &dev_data_sortby_argkeys[gpuid][streamOffset], &dev_weights_sortby_argkeys[gpuid][streamOffset]);
//execute global memory kernel
// cudaError_t err2 = cudaPeekAtLastError();
cudaError_t err2 = cudaGetLastError();
if (err2 != cudaSuccess)
{
// std::cout << "\nCUDA error: " << cudaGetErrorString(err2);
// printf("\n Launching global memory kernel");
printf("\n[CASCADE] Launching global memory kernel");
const unsigned int numBlocks=ceil((numFreqInBatch*1.0)/(SMALLBLOCKSIZE*1.0));
supsmukernel<<<numBlocks,SMALLBLOCKSIZE,0,batchstreams[streamnum]>>>(numFreqInBatch, sizeData, iper, span, alpha, &dev_smo[gpuid][streamOffset], &dev_sc[gpuid][streamOffset],
&dev_t1_sortby_argkeys[gpuid][streamOffset], &dev_data_sortby_argkeys[gpuid][streamOffset], &dev_weights_sortby_argkeys[gpuid][streamOffset]);
}
// }
#endif
///////////////////////////////
// Main kernels
///////////////////////////////
//Some number of threads per frequency
unsigned int numThreadPerFreq2=8; //must divide evenly into the block size
unsigned int NUMBLOCKS10=ceil((numFreqInBatch*numThreadPerFreq2*1.0)/(LARGEBLOCKSIZE*1.0));
const unsigned int SMSIZE2=sizeof(DTYPE)*(LARGEBLOCKSIZE/numThreadPerFreq2);
computePgramReduction<<<NUMBLOCKS10, LARGEBLOCKSIZE, SMSIZE2, batchstreams[streamnum]>>>(batchWriteOffset, numThreadPerFreq2, chi0, sizeData, numFreqInBatch, &dev_smo[gpuid][streamOffset], &dev_data_sortby_argkeys[gpuid][streamOffset], &dev_weights_sortby_argkeys[gpuid][streamOffset], &dev_pgram[gpuid][0]);
//Copy pgram back to host
gpuErrchk(cudaMemcpyAsync(pgram+batchWriteOffset, &dev_pgram[gpuid][batchWriteOffset], sizeof(DTYPE)*numFreqInBatch, cudaMemcpyDeviceToHost, batchstreams[streamnum]));
// fprintf(stderr,"\nBatch: %d, write pgram range: %d, %d ",i, batchWriteOffset,batchWriteOffset+numFreqInBatch);
} //end loop over batches
// cudaError_t err = cudaGetLastError(); // add
// if (err != cudaSuccess) std::cout << "CUDA error: " << cudaGetErrorString(err) << std::endl; // add
///////////////////////////////
// End main kernels
///////////////////////////////
computePeriodSuperSmoother(pgram, numFreq, minFreq, maxFreq, foundPeriod);
printf("\nFound period: %f", *foundPeriod);
double tstartfree=omp_get_wtime();
//free device data
for (int i=0; i<NUMGPU; i++)
{
cudaFree(dev_sc[i]);
cudaFree(dev_pgram[i]);
cudaFree(dev_freqarr[i]);
cudaFree(dev_argkeys[i]);
cudaFree(dev_smo[i]);
cudaFree(dev_t1[i]);
cudaFree(dev_t1_sortby_argkeys[i]);
cudaFree(dev_data_sortby_argkeys[i]);
cudaFree(dev_weights_sortby_argkeys[i]);
cudaFree(dev_tt[i]);
cudaFree(dev_data[i]);
cudaFree(dev_weights[i]);
}
//free host data
free(weights);
free(tt);
double tendfree=omp_get_wtime();
printf("\nTime to free: %f", tendfree - tstartfree);
}
//Only use a single GPU
void supsmu_original_single_gpu(unsigned int * objectId, unsigned int sizeData, const DTYPE minFreq, const DTYPE maxFreq, int numFreq, DTYPE * time, DTYPE * data, DTYPE * error, DTYPE alpha, DTYPE * pgram, DTYPE * foundPeriod, double underestGPUcapacityGiB, int gpuid)
{
printf("\nObject Id: %u", *objectId);fflush(stdout);
double tstartcpu=omp_get_wtime();
int iper=1;
DTYPE span=0.0;
DTYPE * weights = (DTYPE *)malloc(sizeof(DTYPE)*sizeData);
DTYPE * tt = (DTYPE *)malloc(sizeof(DTYPE)*sizeData);
const DTYPE deltaf=(maxFreq-minFreq)/(numFreq*1.0);
DTYPE chi0=0;
compute_chi0_tt_weights(sizeData, time, data, error, &chi0, tt, weights);
////////////////////////
//for batching the frequencies
//0-refers to using original supsmu mode
//1- a flag referring to using a single GPU
unsigned int numBatches=computeNumBatches(0, sizeData, numFreq, underestGPUcapacityGiB, 1);
//upper limit on the number of frequencies in a batch
unsigned int numFreqPerBatch=ceil(numFreq*1.0/numBatches*1.0);
printf("\nObject Id: %u, Number of batches: %u, Number of frequencies per batch: %u", *objectId, numBatches, numFreqPerBatch);fflush(stdout);
double tstartcreatestream=omp_get_wtime();
cudaStream_t batchstreams[NSTREAMSPERGPU*1];
createStreamsOneGPU(batchstreams, NSTREAMSPERGPU, gpuid);
double tendcreatestream=omp_get_wtime();
// printf("\nTime to create streams: %f", tendcreatestream - tstartcreatestream);
//End for batching frequencies
////////////////////////
int * dev_freqarr[1];
DTYPE * dev_smo[1];
DTYPE * dev_t1[1];
int * dev_argkeys[1];
DTYPE * dev_t1_sortby_argkeys[1];
DTYPE * dev_data_sortby_argkeys[1];
DTYPE * dev_weights_sortby_argkeys[1];
DTYPE * dev_tt[1];
DTYPE * dev_data[1];
DTYPE * dev_weights[1];
DTYPE * dev_sc[1];
DTYPE * dev_pgram[1];
//loop used to be here
cudaSetDevice(gpuid);
//Those that depend on the number of frequencies (not the number per batch)
gpuErrchk(cudaMalloc((void**)&dev_pgram[0], sizeof(DTYPE)*numFreq));
//Arrays broken up into batches based on frequency
gpuErrchk(cudaMalloc((void**)&dev_freqarr[0], sizeof(int)*(sizeData*numFreqPerBatch*NSTREAMSPERGPU)));
gpuErrchk(cudaMalloc((void**)&dev_argkeys[0], sizeof(int)*(sizeData*numFreqPerBatch*NSTREAMSPERGPU)));
gpuErrchk(cudaMalloc((void**)&dev_sc[0], sizeof(DTYPE)*(sizeData*8*numFreqPerBatch*NSTREAMSPERGPU)));
gpuErrchk(cudaMalloc((void**)&dev_smo[0], sizeof(DTYPE)*(sizeData*numFreqPerBatch*NSTREAMSPERGPU)));
gpuErrchk(cudaMalloc((void**)&dev_t1[0], sizeof(DTYPE)*(sizeData*numFreqPerBatch*NSTREAMSPERGPU)));
gpuErrchk(cudaMalloc((void**)&dev_t1_sortby_argkeys[0], sizeof(DTYPE)*(sizeData*numFreqPerBatch*NSTREAMSPERGPU)));
gpuErrchk(cudaMalloc((void**)&dev_data_sortby_argkeys[0], sizeof(DTYPE)*(sizeData*numFreqPerBatch*NSTREAMSPERGPU)));
gpuErrchk(cudaMalloc((void**)&dev_weights_sortby_argkeys[0], sizeof(DTYPE)*(sizeData*numFreqPerBatch*NSTREAMSPERGPU)));
//allocate on the GPU
gpuErrchk(cudaMalloc((void**)&dev_tt[0], sizeof(DTYPE)*sizeData));
gpuErrchk(cudaMalloc((void**)&dev_data[0], sizeof(DTYPE)*sizeData));
gpuErrchk(cudaMalloc((void**)&dev_weights[0], sizeof(DTYPE)*sizeData));
//copy to the GPU
gpuErrchk(cudaMemcpyAsync( dev_tt[0], tt, sizeof(DTYPE)*sizeData, cudaMemcpyHostToDevice, batchstreams[0]));
gpuErrchk(cudaMemcpyAsync( dev_data[0], data, sizeof(DTYPE)*sizeData, cudaMemcpyHostToDevice, batchstreams[0]));
gpuErrchk(cudaMemcpyAsync( dev_weights[0], weights, sizeof(DTYPE)*sizeData, cudaMemcpyHostToDevice, batchstreams[0]));
//Loop over batches
#pragma omp parallel for num_threads(NSTREAMSPERGPU)
for (unsigned int i=0; i<numBatches; i++)
{
cudaSetDevice(gpuid);
int globaltid=omp_get_thread_num();
//thread id for a single GPU
int tid=globaltid%NSTREAMSPERGPU;
uint64_t batchWriteOffset=(uint64_t)i*(uint64_t)numFreqPerBatch;
uint64_t offsetFreqId=(uint64_t)i*(uint64_t)numFreqPerBatch;
unsigned int numFreqInBatch=numFreqPerBatch;
unsigned int streamOffset=sizeData*numFreqPerBatch*tid;
int streamnum=tid;
//last batch has fewer frequencies
if((numBatches!=1)&&(i==(numBatches-1)))
{
numFreqInBatch=min(numFreqInBatch,((int)numFreq)-((i)*numFreqPerBatch));
}
printf("\nglobal tid: %d, tid: %d, gpuid: %d, Stream num: %u, Batch Number: %u, number of frequencies: %u",globaltid, tid, gpuid, streamnum, i, numFreqInBatch);
unsigned int NUMBLOCKSDATAFREQ=ceil((sizeData*numFreqInBatch*1.0)/LARGEBLOCKSIZE*1.0);
computePeriodModFOneThreadPerUpdate<<<NUMBLOCKSDATAFREQ,LARGEBLOCKSIZE,0,batchstreams[streamnum]>>>(sizeData, numFreqInBatch, minFreq, offsetFreqId, deltaf, &dev_t1[0][streamOffset], dev_tt[0]);
//Initialize the key arrays
initializeKeyArraysOneThreadPerUpdate<<<NUMBLOCKSDATAFREQ,LARGEBLOCKSIZE,0,batchstreams[streamnum]>>>(sizeData, numFreqInBatch, &dev_argkeys[0][streamOffset], &dev_freqarr[0][streamOffset]);
//Need to do back to back sorts to sort the t1 by argkeys for each frequency
//Need 3 arrays:
//first sort the keys (argkeys) by the values (t1)
//then sort the argkeys/t1 by the freqarr
backToBackSort(&dev_argkeys[0][streamOffset], &dev_freqarr[0][streamOffset], &dev_t1[0][streamOffset], sizeData, numFreqInBatch, batchstreams[streamnum]);
//Map the keys based on argkeys
//Separate map and transform
mapUsingArgKeysOneThreadPerUpdate<<<NUMBLOCKSDATAFREQ,LARGEBLOCKSIZE,0,batchstreams[streamnum]>>>(sizeData, numFreqInBatch, &dev_argkeys[0][streamOffset], &dev_data[0][0], &dev_weights[0][0], &dev_t1[0][streamOffset], &dev_t1_sortby_argkeys[0][streamOffset], &dev_data_sortby_argkeys[0][streamOffset], &dev_weights_sortby_argkeys[0][streamOffset]);
///////////////////////////////
// Main kernels
///////////////////////////////
//global memory only
#if ORIGINALMODE==0
const unsigned int numBlocks=ceil((numFreqInBatch*1.0)/(SMALLBLOCKSIZE*1.0));
supsmukernel<<<numBlocks,SMALLBLOCKSIZE,0,batchstreams[streamnum]>>>(numFreqInBatch, sizeData, iper, span, alpha, &dev_smo[0][streamOffset], &dev_sc[0][streamOffset],
&dev_t1_sortby_argkeys[0][streamOffset], &dev_data_sortby_argkeys[0][streamOffset], &dev_weights_sortby_argkeys[0][streamOffset]);
#endif
//One block per frequency with SM -- use small block size (e.g., 32 threads/block)
#if ORIGINALMODE==1
//Shared memory for x,y,z arrays and other information
const unsigned int SMSIZE=sizeof(DTYPE)*3*sizeData;
supsmukernelSMOneFreqBlock<<<numFreqInBatch,SMALLBLOCKSIZE,SMSIZE,batchstreams[streamnum]>>>(sizeData, iper, span, alpha, &dev_smo[0][streamOffset], &dev_sc[0][streamOffset],
&dev_t1_sortby_argkeys[0][streamOffset], &dev_data_sortby_argkeys[0][streamOffset], &dev_weights_sortby_argkeys[0][streamOffset]);
#endif
#if ORIGINALMODE==2
//uses one thread per frequency with shared memory for x,y,z arrays
const unsigned int numBlocks=ceil((numFreqInBatch*1.0)/(SMALLBLOCKSIZE*1.0));
const unsigned int SMSIZEDATA=sizeof(DTYPE)*3*sizeData*SMALLBLOCKSIZE;
supsmukernelSMOneThreadPerFreq<<<numBlocks,SMALLBLOCKSIZE,SMSIZEDATA,batchstreams[streamnum]>>>(numFreqInBatch, sizeData, iper, span, alpha,
&dev_smo[0][streamOffset], &dev_sc[0][streamOffset], &dev_t1_sortby_argkeys[0][streamOffset], &dev_data_sortby_argkeys[0][streamOffset], &dev_weights_sortby_argkeys[0][streamOffset]);
#endif
//Cascade the execution so that it is robust to running out of shared memory
//Try executing SM kernel 1 thread per freq,
//then global memory kernel (which is guaranteed to execute)
#if ORIGINALMODE==-1
printf("\nCascade mode");
//First, attempt 1 thread per frequency with SM
const unsigned int numBlocks=ceil((numFreqInBatch*1.0)/(SMALLBLOCKSIZE*1.0));
const unsigned int SMSIZEDATA=sizeof(DTYPE)*3*sizeData*SMALLBLOCKSIZE;
supsmukernelSMOneThreadPerFreq<<<numBlocks,SMALLBLOCKSIZE,SMSIZEDATA,batchstreams[streamnum]>>>(numFreqInBatch, sizeData, iper, span, alpha,
&dev_smo[0][streamOffset], &dev_sc[0][streamOffset], &dev_t1_sortby_argkeys[0][streamOffset], &dev_data_sortby_argkeys[0][streamOffset], &dev_weights_sortby_argkeys[0][streamOffset]);
// cudaError_t err = cudaGetLastError();
// if (err != cudaSuccess)
// {
// const unsigned int SMSIZE=sizeof(DTYPE)*3*sizeData;
// supsmukernelSMOneFreqBlock<<<numFreqInBatch,SMALLBLOCKSIZE,SMSIZE,batchstreams[streamnum]>>>(sizeData, iper, span, alpha, &dev_smo[0][streamOffset], &dev_sc[0][streamOffset],
// &dev_t1_sortby_argkeys[0][streamOffset], &dev_data_sortby_argkeys[0][streamOffset], &dev_weights_sortby_argkeys[0][streamOffset]);
//execute global memory kernel
// cudaError_t err2 = cudaPeekAtLastError();
cudaError_t err2 = cudaGetLastError();
if (err2 != cudaSuccess)
{
// std::cout << "\nCUDA error: " << cudaGetErrorString(err2);
printf("\n Launching global memory kernel");
const unsigned int numBlocks=ceil((numFreqInBatch*1.0)/(SMALLBLOCKSIZE*1.0));
supsmukernel<<<numBlocks,SMALLBLOCKSIZE,0,batchstreams[streamnum]>>>(numFreqInBatch, sizeData, iper, span, alpha, &dev_smo[0][streamOffset], &dev_sc[0][streamOffset],
&dev_t1_sortby_argkeys[0][streamOffset], &dev_data_sortby_argkeys[0][streamOffset], &dev_weights_sortby_argkeys[0][streamOffset]);
}
// }
#endif
///////////////////////////////
// Main kernels
///////////////////////////////
//Some number of threads per frequency
unsigned int numThreadPerFreq2=8; //must divide evenly into the block size
unsigned int NUMBLOCKS10=ceil((numFreqInBatch*numThreadPerFreq2*1.0)/(LARGEBLOCKSIZE*1.0));
const unsigned int SMSIZE2=sizeof(DTYPE)*(LARGEBLOCKSIZE/numThreadPerFreq2);
computePgramReduction<<<NUMBLOCKS10, LARGEBLOCKSIZE, SMSIZE2, batchstreams[streamnum]>>>(batchWriteOffset, numThreadPerFreq2, chi0, sizeData, numFreqInBatch, &dev_smo[0][streamOffset], &dev_data_sortby_argkeys[0][streamOffset], &dev_weights_sortby_argkeys[0][streamOffset], &dev_pgram[0][0]);
//Copy pgram back to host
gpuErrchk(cudaMemcpyAsync(pgram+batchWriteOffset, &dev_pgram[0][batchWriteOffset], sizeof(DTYPE)*numFreqInBatch, cudaMemcpyDeviceToHost, batchstreams[streamnum]));
// fprintf(stderr,"\nBatch: %d, write pgram range: %d, %d ",i, batchWriteOffset,batchWriteOffset+numFreqInBatch);
} //end loop over batches
// cudaError_t err = cudaGetLastError(); // add
// if (err != cudaSuccess) std::cout << "CUDA error: " << cudaGetErrorString(err) << std::endl; // add
///////////////////////////////
// End main kernels
///////////////////////////////
computePeriodSuperSmoother(pgram, numFreq, minFreq, maxFreq, foundPeriod);
printf("\nFound period: %f", *foundPeriod);
// //free device data
// for (int i=0; i<NUMGPU; i++)
// {
double tstartfree=omp_get_wtime();
cudaFree(dev_sc[0]);
cudaFree(dev_pgram[0]);
cudaFree(dev_freqarr[0]);
cudaFree(dev_argkeys[0]);
cudaFree(dev_smo[0]);
cudaFree(dev_t1[0]);
cudaFree(dev_t1_sortby_argkeys[0]);
cudaFree(dev_data_sortby_argkeys[0]);
cudaFree(dev_weights_sortby_argkeys[0]);
cudaFree(dev_tt[0]);
cudaFree(dev_data[0]);
cudaFree(dev_weights[0]);
// }
destroyStreamsOneGPU(batchstreams, NSTREAMSPERGPU, gpuid);
//free host data
free(weights);
free(tt);
double tendfree=omp_get_wtime();
printf("\nTime to free: %f", tendfree - tstartfree);
}
void compute_chi0_tt_weights(unsigned int sizeData, DTYPE * time, DTYPE * data, DTYPE * error, DTYPE * chi0, DTYPE * tt, DTYPE * weights)
{
// DTYPE * y = (DTYPE *)malloc(sizeof(DTYPE)*sizeData);
DTYPE y=0;
//compute minimum time
DTYPE minTime=time[0];
for (unsigned int i=0; i<sizeData; i++)
{
if (time[i]<minTime)
{
minTime=time[i];
}
}
DTYPE w0=0.0;
DTYPE tmp=0;
for (unsigned int i=0; i<sizeData; i++)
{
tt[i]=time[i]-minTime;
weights[i]=1.0/(error[i]*error[i]);
w0+=weights[i];
tmp+=(data[i]*weights[i]);
}
w0=w0/(sizeData*1.0);
tmp=tmp/(sizeData*1.0);
DTYPE y0=tmp/w0;
tmp=0;
for (unsigned int i=0; i<sizeData; i++)
{
y=data[i]-y0;
tmp+=(y*y)*weights[i];
}
*chi0=tmp/(sizeData*1.0);
}
double getGPUCapacity()
{
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
//Read the global memory capacity from the device.
unsigned long int globalmembytes=0;
gpuErrchk(cudaMemGetInfo(NULL,&globalmembytes));
double totalcapacityGiB=globalmembytes*1.0/(1024*1024*1024.0);
printf("\n[Device name: %s, Detecting GPU Global Memory Capacity] Size in GiB: %f", prop.name, totalcapacityGiB);
double underestcapacityGiB=totalcapacityGiB*BETA;
printf("\n[Underestimating GPU Global Memory Capacity (BETA: %f)] Size in GiB: %f", BETA, underestcapacityGiB);
return underestcapacityGiB;
}
double computedeltaf(lookupObj * objectLookup, DTYPE * time, unsigned int numUniqueObjects)
{
//Find the maximum time span for all objects
double maxTimeSpan=0;
#pragma omp parallel for reduction(max: maxTimeSpan)
for (unsigned int i=0; i<numUniqueObjects; i++)
{
unsigned int idxMin=objectLookup[i].idxMin;
unsigned int idxMax=objectLookup[i].idxMax;
double timeSpan=time[idxMax]-time[idxMin];
if (maxTimeSpan<timeSpan)
{
maxTimeSpan=timeSpan;
}
}
double df=0.1/maxTimeSpan;
return df;
}
//mode-0 original supsmu
//mode-1 single pass supsmu
void supsmu_gpu_batch(const bool mode, unsigned int * objectId, unsigned int sizeData, const DTYPE minFreq, const DTYPE maxFreq, unsigned int numFreq, DTYPE * time, DTYPE * data, DTYPE * error, DTYPE alpha, DTYPE ** pgram, DTYPE * sumPeriods)
{
double tstartpreamble=omp_get_wtime();
//get the global memory capacity of the GPU and then underestimate it so that we don't have out of memory errors
double underestGPUcapacityGiB=getGPUCapacity();
//For doing batch processing
struct lookupObj * objectLookup=NULL;
unsigned int numUniqueObjects;
computeObjectRanges(objectId, &sizeData, &objectLookup, &numUniqueObjects);
//utility function: compute deltaf
// double deltaf=computedeltaf(objectLookup, time, numUniqueObjects);
// printf("\nDelta f: %f", deltaf);
//allocate memory for the pgram
#ifndef PYTHON
*pgram=(DTYPE *)malloc(sizeof(DTYPE)*(uint64_t)numFreq*(uint64_t)numUniqueObjects);
#endif
// printf("\nPgram GiB: %f", (sizeof(DTYPE)*numFreq*numUniqueObjects*1.0)/(1024*1024*1024.0));
// *pgram=(DTYPE *)calloc((unsigned int)numFreq*numUniqueObjects,sizeof(DTYPE));
DTYPE * periods=(DTYPE *)malloc(sizeof(DTYPE)*numUniqueObjects);
// DTYPE * periods=(DTYPE *)calloc(numUniqueObjects,sizeof(DTYPE));
//number of objects skipped because they didn't have enough observations
unsigned int countSkippedObjectsThresh=0;
//Computing SS is parallelized as follows:
//1) If you are computing a single object, parallelize the object across multiple GPUs
//2) If you are computing a batch of objects, execute a single object per GPU (assuming you are using multiple GPUs)
double tendpreamble=omp_get_wtime();
printf("\nPreabmble before calling main function: %f", tendpreamble - tstartpreamble);
//1) single object-- parallelize single object on multiple GPUs
if (numUniqueObjects==1)
{
unsigned int idxMin=objectLookup[0].idxMin;
unsigned int idxMax=objectLookup[0].idxMax;
unsigned int sizeDataForObject=idxMax-idxMin+1;
uint64_t pgramOffset=0;
DTYPE foundPeriod;
if (sizeDataForObject>=OBSTHRESH)
{
//original
if (mode==0)
{
supsmu_original_single_object(objectId, sizeDataForObject, minFreq, maxFreq, numFreq, &time[idxMin], &data[idxMin], &error[idxMin], alpha, *pgram+pgramOffset, &foundPeriod, underestGPUcapacityGiB);
}
//single pass
if (mode==1)
{
supsmu_singlepass_single_object(objectId, sizeDataForObject, minFreq, maxFreq, numFreq, &time[idxMin], &data[idxMin], &error[idxMin], alpha, *pgram+pgramOffset, &foundPeriod, underestGPUcapacityGiB);
}
periods[0]=foundPeriod;
}
else
{
periods[0]=0.0;
countSkippedObjectsThresh++;
}
}
//2) multiple objects -- parallelize one object per GPU
//dynamic scheduling since time series are different lengths
else
{
#pragma omp parallel for schedule(dynamic) num_threads(NUMGPU) reduction(+:countSkippedObjectsThresh)
for (unsigned int i=0; i<numUniqueObjects; i++)
{
unsigned int idxMin=objectLookup[i].idxMin;
unsigned int idxMax=objectLookup[i].idxMax;
unsigned int sizeDataForObject=idxMax-idxMin+1;
uint64_t pgramOffset=(uint64_t)i*(uint64_t)numFreq;
DTYPE foundPeriod;
int tid=omp_get_thread_num();
//only process objects with at least OBSTHRESH data points
if(sizeDataForObject>=OBSTHRESH)
{
//original supsmu
if (mode==0)
{
//could parallelize the batch of objectss by parallelizing each object individually
// supsmu_original_single_object(objectId, sizeDataForObject, minFreq, maxFreq, numFreq, &time[idxMin], &data[idxMin], &error[idxMin], alpha, *pgram+pgramOffset, &foundPeriod, underestGPUcapacityGiB);
supsmu_original_single_gpu(&objectLookup[i].objId, sizeDataForObject, minFreq, maxFreq, numFreq, &time[idxMin], &data[idxMin], &error[idxMin], alpha, *pgram+pgramOffset, &foundPeriod, underestGPUcapacityGiB, tid);
}
//single pass supsmu
if (mode==1)
{
supsmu_singlepass_single_gpu(&objectLookup[i].objId, sizeDataForObject, minFreq, maxFreq, numFreq, &time[idxMin], &data[idxMin], &error[idxMin], alpha, *pgram+pgramOffset, &foundPeriod, underestGPUcapacityGiB, tid);
}
periods[i]=foundPeriod;
}
//too few data points to compute the periods
else
{
countSkippedObjectsThresh++;
periods[i]=0.0;
}
} //end parallel for loop
} //end if statement around unique objects
printf("\nNumber of objects skipped because they didn't have %d observations: %u", OBSTHRESH, countSkippedObjectsThresh);
for (unsigned int i=0; i<numUniqueObjects; i++)
{
*sumPeriods+=periods[i];
}
///////////////////////
//Output
//print found periods to stdout
#if PRINTPERIODS==1
outputPeriodsToStdout(objectLookup, numUniqueObjects, periods);
#endif
//print found periods to file
#if PRINTPERIODS==2
outputPeriodsToFile(objectLookup, numUniqueObjects, periods);
#endif
//Output pgram to file
#if PRINTPGRAM==1
outputPgramToFile(objectLookup, numUniqueObjects, numFreq, pgram);
#endif
//End output
///////////////////////
free(periods);
free(objectLookup);
}
void createStreams(cudaStream_t * streams, unsigned int num_gpus, unsigned int streams_per_gpu)
{
// #pragma omp parallel for num_threads(num_gpus)
for (unsigned int i=0; i<num_gpus; i++)
{
//set device
cudaSetDevice(i);
//create stream for the device
for (unsigned int j=0; j<streams_per_gpu; j++)
{
cudaStreamCreate(&streams[(i*streams_per_gpu)+j]);
}
}
}
void destroyStreamsOneGPU(cudaStream_t * streams, unsigned int streams_per_gpu, int gpuid)
{
//set device
cudaSetDevice(gpuid);
//create stream for the device
for (unsigned int i=0; i<streams_per_gpu; i++)
{
cudaStreamDestroy(streams[i]);
}
}
void createStreamsOneGPU(cudaStream_t * streams, unsigned int streams_per_gpu, int gpuid)
{
//set device
cudaSetDevice(gpuid);
//create stream for the device
for (unsigned int i=0; i<streams_per_gpu; i++)
{
cudaStreamCreate(&streams[i]);
}
}
//GPU supersmoother with single pass
//Processes a single object potentially with multiple GPUs
void supsmu_singlepass_single_gpu(unsigned int * objectId, unsigned int sizeData, const DTYPE minFreq, const DTYPE maxFreq, int numFreq, DTYPE * time, DTYPE * data, DTYPE * error, DTYPE alpha, DTYPE * pgram, DTYPE * foundPeriod, double underestGPUcapacityGiB, int gpuid)
{
double tstartcpu=omp_get_wtime();
// int iper=1;
// DTYPE span=0.0;
//Allocate host memory
DTYPE * weights = (DTYPE *)malloc(sizeof(DTYPE)*sizeData);
DTYPE * tt = (DTYPE *)malloc(sizeof(DTYPE)*sizeData);
const DTYPE deltaf=(maxFreq-minFreq)/(numFreq*1.0);
DTYPE chi0=0;
compute_chi0_tt_weights(sizeData, time, data, error, &chi0, tt, weights);
double tendcpu=omp_get_wtime();
printf("\nCPU preamble time: %f", tendcpu - tstartcpu);
double tstartGPUPreabble=omp_get_wtime();
////////////////////////
//for batching the frequencies (not objects)
//1- mode single pass
//1- compute assuming a single GPU
unsigned int numBatches=computeNumBatches(1, sizeData, numFreq, underestGPUcapacityGiB, 1);
//upper limit on the number of frequencies in a batch
int numFreqPerBatch=ceil(numFreq*1.0/numBatches*1.0);
printf("\nNumber of batches: %d, Number of frequencies per batch: %d", numBatches, numFreqPerBatch);
double tstartcreatestream=omp_get_wtime();
cudaStream_t batchstreams[NSTREAMSPERGPU*1];
createStreamsOneGPU(batchstreams, NSTREAMSPERGPU, gpuid);
double tendcreatestream=omp_get_wtime();
// printf("\nTime to create streams: %f", tendcreatestream - tstartcreatestream);
//End for batching frequencies
////////////////////////
//Device variables
int * dev_freqarr[1];
DTYPE * dev_smo[1];
DTYPE * dev_t1[1];
int * dev_argkeys[1];
DTYPE * dev_t1_sortby_argkeys[1];
DTYPE * dev_data_sortby_argkeys[1];
DTYPE * dev_weights_sortby_argkeys[1];
DTYPE * dev_tt[1];
DTYPE * dev_data[1];
DTYPE * dev_weights[1];
DTYPE * dev_pgram[1];
//loop used to be here
cudaSetDevice(gpuid);
//Those that depend on the number of frequencies (not the number per batch)
gpuErrchk(cudaMalloc((void**)&dev_pgram[0], sizeof(DTYPE)*numFreq));
//Arrays broken up into batches based on frequency
gpuErrchk(cudaMalloc((void**)&dev_freqarr[0], sizeof(int)*(sizeData*numFreqPerBatch*NSTREAMSPERGPU)));
gpuErrchk(cudaMalloc((void**)&dev_argkeys[0], sizeof(int)*(sizeData*numFreqPerBatch*NSTREAMSPERGPU)));
gpuErrchk(cudaMalloc((void**)&dev_smo[0], sizeof(DTYPE)*(sizeData*numFreqPerBatch*NSTREAMSPERGPU)));
gpuErrchk(cudaMalloc((void**)&dev_t1[0], sizeof(DTYPE)*(sizeData*numFreqPerBatch*NSTREAMSPERGPU)));
gpuErrchk(cudaMalloc((void**)&dev_t1_sortby_argkeys[0], sizeof(DTYPE)*(sizeData*numFreqPerBatch*NSTREAMSPERGPU)));
gpuErrchk(cudaMalloc((void**)&dev_data_sortby_argkeys[0], sizeof(DTYPE)*(sizeData*numFreqPerBatch*NSTREAMSPERGPU)));
gpuErrchk(cudaMalloc((void**)&dev_weights_sortby_argkeys[0], sizeof(DTYPE)*(sizeData*numFreqPerBatch*NSTREAMSPERGPU)));
//allocate on the GPU
gpuErrchk(cudaMalloc((void**)&dev_tt[0], sizeof(DTYPE)*sizeData));
gpuErrchk(cudaMalloc((void**)&dev_data[0], sizeof(DTYPE)*sizeData));
gpuErrchk(cudaMalloc((void**)&dev_weights[0], sizeof(DTYPE)*sizeData));
//copy to the GPU
gpuErrchk(cudaMemcpyAsync( dev_tt[0], tt, sizeof(DTYPE)*sizeData, cudaMemcpyHostToDevice, batchstreams[0]));
gpuErrchk(cudaMemcpyAsync( dev_data[0], data, sizeof(DTYPE)*sizeData, cudaMemcpyHostToDevice, batchstreams[0]));
gpuErrchk(cudaMemcpyAsync( dev_weights[0], weights, sizeof(DTYPE)*sizeData, cudaMemcpyHostToDevice, batchstreams[0]));
// double tend_GPUinit=omp_get_wtime();
// printf("\nTime initializing al GPUs: %f", tend_GPUinit-tstart_GPUinit);
double tendGPUPreabble=omp_get_wtime();
printf("\nTime GPU preamble: %f", tendGPUPreabble - tstartGPUPreabble);
double tstartmainloop=omp_get_wtime();
//Loop over Batches
#pragma omp parallel for num_threads(NSTREAMSPERGPU)
for (unsigned int i=0; i<numBatches; i++)
{
cudaSetDevice(gpuid);
int globaltid=omp_get_thread_num();
//thread id for a single GPU
int tid=globaltid%NSTREAMSPERGPU;
uint64_t batchWriteOffset=(uint64_t)i*(uint64_t)numFreqPerBatch;
uint64_t offsetFreqId=(uint64_t)i*(uint64_t)numFreqPerBatch;
int numFreqInBatch=numFreqPerBatch;
int streamOffset=sizeData*numFreqPerBatch*tid;
int streamnum=tid;
//last batch has fewer frequencies
if((numBatches!=1)&&(i==(numBatches-1)))
{
numFreqInBatch=min(numFreqInBatch,((int)numFreq)-((i)*numFreqPerBatch));
}
printf("\nglobal tid: %d, tid: %d, gpuid: %d, Stream num: %d, Batch Number: %d, number of frequencies: %d",globaltid, tid, gpuid, streamnum, i, numFreqInBatch);
unsigned int NUMBLOCKSDATAFREQ=ceil((sizeData*numFreqInBatch*1.0)/LARGEBLOCKSIZE*1.0);
computePeriodModFOneThreadPerUpdate<<<NUMBLOCKSDATAFREQ,LARGEBLOCKSIZE,0,batchstreams[streamnum]>>>(sizeData, numFreqInBatch, minFreq, offsetFreqId, deltaf, &dev_t1[0][streamOffset], dev_tt[0]);
//Initialize the key arrays
initializeKeyArraysOneThreadPerUpdate<<<NUMBLOCKSDATAFREQ,LARGEBLOCKSIZE,0,batchstreams[streamnum]>>>(sizeData, numFreqInBatch, &dev_argkeys[0][streamOffset], &dev_freqarr[0][streamOffset]);
//Need to do back to back sorts to sort the t1 by argkeys for each frequency
//Need 3 arrays:
//first sort the keys (argkeys) by the values (t1)
//then sort the argkeys/t1 by the freqarr
backToBackSort(&dev_argkeys[0][streamOffset], &dev_freqarr[0][streamOffset], &dev_t1[0][streamOffset], sizeData, numFreqInBatch, batchstreams[streamnum]);
//Map the keys based on argkeys
//Separate map and transform
#if COALESCED==0 || SINGLEPASSMODE==1 || SINGLEPASSMODE==2
mapUsingArgKeysOneThreadPerUpdate<<<NUMBLOCKSDATAFREQ,LARGEBLOCKSIZE,0,batchstreams[streamnum]>>>(sizeData, numFreqInBatch, &dev_argkeys[0][streamOffset], &dev_data[0][0], &dev_weights[0][0], &dev_t1[0][streamOffset], &dev_t1_sortby_argkeys[0][streamOffset], &dev_data_sortby_argkeys[0][streamOffset], &dev_weights_sortby_argkeys[0][streamOffset]);
#endif
//combine map and transform for coalesced memory accesses for global memory kernel
#if SINGLEPASSMODE==0 && COALESCED==1
mapUsingArgKeysOneThreadPerUpdateAndReorderCoalesced<<<NUMBLOCKSDATAFREQ,LARGEBLOCKSIZE,0,batchstreams[streamnum]>>>(sizeData, numFreqInBatch, &dev_argkeys[0][streamOffset],
&dev_data[0][0], &dev_weights[0][0],
&dev_t1[0][streamOffset], &dev_t1_sortby_argkeys[0][streamOffset], &dev_data_sortby_argkeys[0][streamOffset], &dev_weights_sortby_argkeys[0][streamOffset]);
#endif
///////////////////////////////
// Main kernels
///////////////////////////////
//global memory only
#if SINGLEPASSMODE==0
const unsigned int numBlocks=ceil((numFreqInBatch*1.0)/(SMALLBLOCKSIZE*1.0));
#if COALESCED==0
supsmukernelSinglePassGlobalMemory<<<numBlocks,SMALLBLOCKSIZE, 0,batchstreams[streamnum]>>>(numFreqInBatch, sizeData, alpha, &dev_smo[0][streamOffset],
&dev_tt[0][0], &dev_t1_sortby_argkeys[0][streamOffset], &dev_data_sortby_argkeys[0][streamOffset], &dev_weights_sortby_argkeys[0][streamOffset]);
#endif
#if COALESCED==1
supsmukernelSinglePassGlobalMemoryCoalesced<<<numBlocks,SMALLBLOCKSIZE, 0,batchstreams[streamnum]>>>(numFreqInBatch, sizeData, alpha, &dev_smo[0][streamOffset],
&dev_tt[0][0], &dev_t1_sortby_argkeys[0][streamOffset], &dev_data_sortby_argkeys[0][streamOffset], &dev_weights_sortby_argkeys[0][streamOffset]);
#endif
#endif
//One block per frequency with SM -- use small block size (e.g., 32 threads/block)
#if SINGLEPASSMODE==1
//Shared memory for x,y,z arrays
const unsigned int SMSIZE=sizeof(DTYPE)*3*sizeData;
supsmukernelSinglePassSMOneBlockPerFreq<<<numFreqInBatch,SMALLBLOCKSIZE,SMSIZE,batchstreams[streamnum]>>>(sizeData, alpha, &dev_smo[0][streamOffset],
&dev_t1_sortby_argkeys[0][streamOffset], &dev_data_sortby_argkeys[0][streamOffset], &dev_weights_sortby_argkeys[0][streamOffset]);
#endif
#if SINGLEPASSMODE==2
//uses one thread per frequency with shared memory for x,y,z arrays
const unsigned int numBlocks=ceil((numFreqInBatch*1.0)/(SMALLBLOCKSIZE*1.0));
const unsigned int SMSIZEDATA=sizeof(DTYPE)*3*sizeData*SMALLBLOCKSIZE;
supsmukernelSinglePassSMOneThreadPerFreq<<<numBlocks,SMALLBLOCKSIZE,SMSIZEDATA,batchstreams[streamnum]>>>(numFreqInBatch, sizeData,
alpha, &dev_smo[0][streamOffset], &dev_t1_sortby_argkeys[0][streamOffset], &dev_data_sortby_argkeys[0][streamOffset],
&dev_weights_sortby_argkeys[0][streamOffset]);
#endif
//Some number of threads per frequency
unsigned int numThreadPerFreq2=8; //must divide evenly into the block size
unsigned int NUMBLOCKS10=ceil((numFreqInBatch*numThreadPerFreq2*1.0)/(LARGEBLOCKSIZE*1.0));
const unsigned int SMSIZE2=sizeof(DTYPE)*(LARGEBLOCKSIZE/numThreadPerFreq2);
#if COALESCED==0 || SINGLEPASSMODE==1 || SINGLEPASSMODE==2
computePgramReduction<<<NUMBLOCKS10, LARGEBLOCKSIZE, SMSIZE2, batchstreams[streamnum]>>>(batchWriteOffset, numThreadPerFreq2, chi0, sizeData, numFreqInBatch, &dev_smo[0][streamOffset], &dev_data_sortby_argkeys[0][streamOffset], &dev_weights_sortby_argkeys[0][streamOffset], &dev_pgram[0][0]);
#endif
#if SINGLEPASSMODE==0 && COALESCED==1
computePgramReductionCoalesced<<<NUMBLOCKS10, LARGEBLOCKSIZE, SMSIZE2, batchstreams[streamnum]>>>(batchWriteOffset, numThreadPerFreq2, chi0, sizeData,
numFreqInBatch, &dev_smo[0][streamOffset], &dev_data_sortby_argkeys[0][streamOffset], &dev_weights_sortby_argkeys[0][streamOffset], &dev_pgram[0][0]);
#endif
//Copy pgram back to host
gpuErrchk(cudaMemcpyAsync(pgram+batchWriteOffset, &dev_pgram[0][batchWriteOffset], sizeof(DTYPE)*numFreqInBatch, cudaMemcpyDeviceToHost, batchstreams[streamnum]));
// printf("\nBatch: %d, write pgram range: %d, %d ",i, batchWriteOffset,batchWriteOffset+numFreqInBatch);
} //end loop over batches
double tendmainloop=omp_get_wtime();
printf("\nTime main loop: %f",tendmainloop - tstartmainloop);
///////////////////////////////
// End main kernels
///////////////////////////////
double tstartperiod=omp_get_wtime();
computePeriodSuperSmoother(pgram, numFreq, minFreq, maxFreq, foundPeriod);
double tendperiod=omp_get_wtime();
printf("\nObject id: %d, Found period: %f", *objectId, *foundPeriod);
printf("\nTime to compute period: %f", tendperiod - tstartperiod);
double tstartfree=omp_get_wtime();
//free device data
cudaFree(dev_pgram[0]);
cudaFree(dev_freqarr[0]);
cudaFree(dev_argkeys[0]);
cudaFree(dev_smo[0]);
cudaFree(dev_t1[0]);
cudaFree(dev_t1_sortby_argkeys[0]);
cudaFree(dev_data_sortby_argkeys[0]);
cudaFree(dev_weights_sortby_argkeys[0]);
cudaFree(dev_tt[0]);
cudaFree(dev_data[0]);
cudaFree(dev_weights[0]);
destroyStreamsOneGPU(batchstreams, NSTREAMSPERGPU, gpuid);
//free host data
free(weights);
free(tt);
double tendfree=omp_get_wtime();
printf("\nTime to free: %f", tendfree - tstartfree);
}
//GPU supersmoother with single pass
//Processes a single object potentially with multiple GPUs
void supsmu_singlepass_single_object(unsigned int * objectId, unsigned int sizeData, const DTYPE minFreq, const DTYPE maxFreq, int numFreq, DTYPE * time, DTYPE * data, DTYPE * error, DTYPE alpha, DTYPE * pgram, DTYPE * foundPeriod, double underestGPUcapacityGiB)
{
double tstartcpu=omp_get_wtime();
// int iper=1;
// DTYPE span=0.0;
//Allocate host memory
DTYPE * weights = (DTYPE *)malloc(sizeof(DTYPE)*sizeData);
DTYPE * tt = (DTYPE *)malloc(sizeof(DTYPE)*sizeData);
const DTYPE deltaf=(maxFreq-minFreq)/(numFreq*1.0);
DTYPE chi0=0;
compute_chi0_tt_weights(sizeData, time, data, error, &chi0, tt, weights);
double tendcpu=omp_get_wtime();
printf("\nCPU preamble time: %f", tendcpu - tstartcpu);
double tstartGPUPreabble=omp_get_wtime();
////////////////////////
//for batching the frequencies (not objects)
//1- mode single pass
//0- compute all batches assuming using all GPUs
unsigned int numBatches=computeNumBatches(1, sizeData, numFreq, underestGPUcapacityGiB, 0);
//upper limit on the number of frequencies in a batch
int numFreqPerBatch=ceil(numFreq*1.0/numBatches*1.0);
printf("\nNumber of batches: %d, Number of frequencies per batch: %d", numBatches, numFreqPerBatch);
double tstartcreatestream=omp_get_wtime();
cudaStream_t batchstreams[NSTREAMSPERGPU*NUMGPU];
createStreams(batchstreams, NUMGPU, NSTREAMSPERGPU);
double tendcreatestream=omp_get_wtime();
// printf("\nTime to create streams: %f", tendcreatestream - tstartcreatestream);
//End for batching frequencies
////////////////////////
//Device variables
int * dev_freqarr[NUMGPU];
DTYPE * dev_smo[NUMGPU];
DTYPE * dev_t1[NUMGPU];
int * dev_argkeys[NUMGPU];
DTYPE * dev_t1_sortby_argkeys[NUMGPU];
DTYPE * dev_data_sortby_argkeys[NUMGPU];
DTYPE * dev_weights_sortby_argkeys[NUMGPU];
DTYPE * dev_tt[NUMGPU];
DTYPE * dev_data[NUMGPU];
DTYPE * dev_weights[NUMGPU];
DTYPE * dev_pgram[NUMGPU];
// double tstart_GPUinit=omp_get_wtime();
// printf("\nParallelize cudaMalloc for each GPU later");
//Allocate memory and copy data to each GPU
#pragma omp parallel for num_threads(NUMGPU)
for (int i=0; i<NUMGPU; i++)
{
int globaltid=omp_get_thread_num();
int tid=globaltid%NSTREAMSPERGPU;
int gpuid=globaltid/NSTREAMSPERGPU;
int streamnum=(gpuid*NSTREAMSPERGPU)+tid;
cudaSetDevice(i);
//Those that depend on the number of frequencies (not the number per batch)
gpuErrchk(cudaMalloc((void**)&dev_pgram[i], sizeof(DTYPE)*numFreq));
//Arrays broken up into batches based on frequency
gpuErrchk(cudaMalloc((void**)&dev_freqarr[i], sizeof(int)*(sizeData*numFreqPerBatch*NSTREAMSPERGPU)));
gpuErrchk(cudaMalloc((void**)&dev_argkeys[i], sizeof(int)*(sizeData*numFreqPerBatch*NSTREAMSPERGPU)));
gpuErrchk(cudaMalloc((void**)&dev_smo[i], sizeof(DTYPE)*(sizeData*numFreqPerBatch*NSTREAMSPERGPU)));
gpuErrchk(cudaMalloc((void**)&dev_t1[i], sizeof(DTYPE)*(sizeData*numFreqPerBatch*NSTREAMSPERGPU)));
gpuErrchk(cudaMalloc((void**)&dev_t1_sortby_argkeys[i], sizeof(DTYPE)*(sizeData*numFreqPerBatch*NSTREAMSPERGPU)));
gpuErrchk(cudaMalloc((void**)&dev_data_sortby_argkeys[i], sizeof(DTYPE)*(sizeData*numFreqPerBatch*NSTREAMSPERGPU)));
gpuErrchk(cudaMalloc((void**)&dev_weights_sortby_argkeys[i], sizeof(DTYPE)*(sizeData*numFreqPerBatch*NSTREAMSPERGPU)));
//allocate on the GPU
gpuErrchk(cudaMalloc((void**)&dev_tt[i], sizeof(DTYPE)*sizeData));
gpuErrchk(cudaMalloc((void**)&dev_data[i], sizeof(DTYPE)*sizeData));
gpuErrchk(cudaMalloc((void**)&dev_weights[i], sizeof(DTYPE)*sizeData));
//copy to the GPU
gpuErrchk(cudaMemcpyAsync( dev_tt[i], tt, sizeof(DTYPE)*sizeData, cudaMemcpyHostToDevice, batchstreams[streamnum]));
gpuErrchk(cudaMemcpyAsync( dev_data[i], data, sizeof(DTYPE)*sizeData, cudaMemcpyHostToDevice, batchstreams[streamnum]));
gpuErrchk(cudaMemcpyAsync( dev_weights[i], weights, sizeof(DTYPE)*sizeData, cudaMemcpyHostToDevice, batchstreams[streamnum]));
}
// double tend_GPUinit=omp_get_wtime();
// printf("\nTime initializing al GPUs: %f", tend_GPUinit-tstart_GPUinit);
double tendGPUPreabble=omp_get_wtime();
printf("\nTime GPU preamble: %f", tendGPUPreabble - tstartGPUPreabble);
double tstartmainloop=omp_get_wtime();
//Loop over Batches
#pragma omp parallel for num_threads(NUMGPU*NSTREAMSPERGPU)
for (unsigned int i=0; i<numBatches; i++)
{
int globaltid=omp_get_thread_num();
//thread id for a single GPU
int tid=globaltid%NSTREAMSPERGPU;
int gpuid=globaltid/NSTREAMSPERGPU;
uint64_t batchWriteOffset=(uint64_t)i*(uint64_t)numFreqPerBatch;
uint64_t offsetFreqId=(uint64_t)i*(uint64_t)numFreqPerBatch;
int numFreqInBatch=numFreqPerBatch;
int streamOffset=sizeData*numFreqPerBatch*tid;
int streamnum=(gpuid*NSTREAMSPERGPU)+tid;
cudaSetDevice(gpuid);
//last batch has fewer frequencies
if((numBatches!=1)&&(i==(numBatches-1)))
{
numFreqInBatch=min(numFreqInBatch,((int)numFreq)-((i)*numFreqPerBatch));
}
printf("\nglobal tid: %d, tid: %d, gpuid: %d, Stream num: %d, Batch Number: %d, number of frequencies: %d",globaltid, tid, gpuid, streamnum, i, numFreqInBatch);
// unsigned int NUMBLOCKSDATAFREQ=ceil((sizeData*numFreqInBatch*1.0)/LARGEBLOCKSIZE*1.0);
// computePeriodModFOneThreadPerUpdate<<<NUMBLOCKSDATAFREQ,LARGEBLOCKSIZE,0,batchstreams[streamnum]>>>(sizeData, numFreqInBatch, minFreqBatch, deltaf, &dev_t1[gpuid][streamOffset], dev_tt[gpuid]);
unsigned int NUMBLOCKSDATAFREQ=ceil((sizeData*numFreqInBatch*1.0)/LARGEBLOCKSIZE*1.0);
computePeriodModFOneThreadPerUpdate<<<NUMBLOCKSDATAFREQ,LARGEBLOCKSIZE,0,batchstreams[streamnum]>>>(sizeData, numFreqInBatch, minFreq, offsetFreqId, deltaf, &dev_t1[gpuid][streamOffset], dev_tt[gpuid]);
//Initialize the key arrays
initializeKeyArraysOneThreadPerUpdate<<<NUMBLOCKSDATAFREQ,LARGEBLOCKSIZE,0,batchstreams[streamnum]>>>(sizeData, numFreqInBatch, &dev_argkeys[gpuid][streamOffset], &dev_freqarr[gpuid][streamOffset]);
//Need to do back to back sorts to sort the t1 by argkeys for each frequency
//Need 3 arrays:
//first sort the keys (argkeys) by the values (t1)
//then sort the argkeys/t1 by the freqarr
backToBackSort(&dev_argkeys[gpuid][streamOffset], &dev_freqarr[gpuid][streamOffset], &dev_t1[gpuid][streamOffset], sizeData, numFreqInBatch, batchstreams[streamnum]);
//Map the keys based on argkeys
//Separate map and transform
#if COALESCED==0 || SINGLEPASSMODE==1 || SINGLEPASSMODE==2
mapUsingArgKeysOneThreadPerUpdate<<<NUMBLOCKSDATAFREQ,LARGEBLOCKSIZE,0,batchstreams[streamnum]>>>(sizeData, numFreqInBatch, &dev_argkeys[gpuid][streamOffset], &dev_data[gpuid][0], &dev_weights[gpuid][0], &dev_t1[gpuid][streamOffset], &dev_t1_sortby_argkeys[gpuid][streamOffset], &dev_data_sortby_argkeys[gpuid][streamOffset], &dev_weights_sortby_argkeys[gpuid][streamOffset]);
#endif
//combine map and transform for coalesced memory accesses for global memory kernel
#if SINGLEPASSMODE==0 && COALESCED==1
mapUsingArgKeysOneThreadPerUpdateAndReorderCoalesced<<<NUMBLOCKSDATAFREQ,LARGEBLOCKSIZE,0,batchstreams[streamnum]>>>(sizeData, numFreqInBatch, &dev_argkeys[gpuid][streamOffset],
&dev_data[gpuid][0], &dev_weights[gpuid][0],
&dev_t1[gpuid][streamOffset], &dev_t1_sortby_argkeys[gpuid][streamOffset], &dev_data_sortby_argkeys[gpuid][streamOffset], &dev_weights_sortby_argkeys[gpuid][streamOffset]);
#endif
///////////////////////////////
// Main kernels
///////////////////////////////
//global memory only
#if SINGLEPASSMODE==0
const unsigned int numBlocks=ceil((numFreqInBatch*1.0)/(SMALLBLOCKSIZE*1.0));
#if COALESCED==0
supsmukernelSinglePassGlobalMemory<<<numBlocks,SMALLBLOCKSIZE, 0,batchstreams[streamnum]>>>(numFreqInBatch, sizeData, alpha, &dev_smo[gpuid][streamOffset],
&dev_tt[gpuid][0], &dev_t1_sortby_argkeys[gpuid][streamOffset], &dev_data_sortby_argkeys[gpuid][streamOffset], &dev_weights_sortby_argkeys[gpuid][streamOffset]);
#endif
#if COALESCED==1
supsmukernelSinglePassGlobalMemoryCoalesced<<<numBlocks,SMALLBLOCKSIZE, 0,batchstreams[streamnum]>>>(numFreqInBatch, sizeData, alpha, &dev_smo[gpuid][streamOffset],
&dev_tt[gpuid][0], &dev_t1_sortby_argkeys[gpuid][streamOffset], &dev_data_sortby_argkeys[gpuid][streamOffset], &dev_weights_sortby_argkeys[gpuid][streamOffset]);
#endif
#endif
//One block per frequency with SM -- use small block size (e.g., 32 threads/block)
#if SINGLEPASSMODE==1
//Shared memory for x,y,z arrays
const unsigned int SMSIZE=sizeof(DTYPE)*3*sizeData;
supsmukernelSinglePassSMOneBlockPerFreq<<<numFreqInBatch,SMALLBLOCKSIZE,SMSIZE,batchstreams[streamnum]>>>(sizeData, alpha, &dev_smo[gpuid][streamOffset],
&dev_t1_sortby_argkeys[gpuid][streamOffset], &dev_data_sortby_argkeys[gpuid][streamOffset], &dev_weights_sortby_argkeys[gpuid][streamOffset]);
#endif
#if SINGLEPASSMODE==2
//uses one thread per frequency with shared memory for x,y,z arrays
const unsigned int numBlocks=ceil((numFreqInBatch*1.0)/(SMALLBLOCKSIZE*1.0));
const unsigned int SMSIZEDATA=sizeof(DTYPE)*3*sizeData*SMALLBLOCKSIZE;
supsmukernelSinglePassSMOneThreadPerFreq<<<numBlocks,SMALLBLOCKSIZE,SMSIZEDATA,batchstreams[streamnum]>>>(numFreqInBatch, sizeData,
alpha, &dev_smo[gpuid][streamOffset], &dev_t1_sortby_argkeys[gpuid][streamOffset], &dev_data_sortby_argkeys[gpuid][streamOffset],
&dev_weights_sortby_argkeys[gpuid][streamOffset]);
#endif
//Some number of threads per frequency
unsigned int numThreadPerFreq2=8; //must divide evenly into the block size
unsigned int NUMBLOCKS10=ceil((numFreqInBatch*numThreadPerFreq2*1.0)/(LARGEBLOCKSIZE*1.0));
const unsigned int SMSIZE2=sizeof(DTYPE)*(LARGEBLOCKSIZE/numThreadPerFreq2);
#if COALESCED==0 || SINGLEPASSMODE==1 || SINGLEPASSMODE==2
computePgramReduction<<<NUMBLOCKS10, LARGEBLOCKSIZE, SMSIZE2, batchstreams[streamnum]>>>(batchWriteOffset, numThreadPerFreq2, chi0, sizeData, numFreqInBatch, &dev_smo[gpuid][streamOffset], &dev_data_sortby_argkeys[gpuid][streamOffset], &dev_weights_sortby_argkeys[gpuid][streamOffset], &dev_pgram[gpuid][0]);
#endif
#if SINGLEPASSMODE==0 && COALESCED==1
computePgramReductionCoalesced<<<NUMBLOCKS10, LARGEBLOCKSIZE, SMSIZE2, batchstreams[streamnum]>>>(batchWriteOffset, numThreadPerFreq2, chi0, sizeData,
numFreqInBatch, &dev_smo[gpuid][streamOffset], &dev_data_sortby_argkeys[gpuid][streamOffset], &dev_weights_sortby_argkeys[gpuid][streamOffset], &dev_pgram[gpuid][0]);
#endif
//Copy pgram back to host
gpuErrchk(cudaMemcpyAsync(pgram+batchWriteOffset, &dev_pgram[gpuid][batchWriteOffset], sizeof(DTYPE)*numFreqInBatch, cudaMemcpyDeviceToHost, batchstreams[streamnum]));
// printf("\nBatch: %d, write pgram range: %d, %d ",i, batchWriteOffset,batchWriteOffset+numFreqInBatch);
} //end loop over batches
double tendmainloop=omp_get_wtime();
printf("\nTime main loop: %f",tendmainloop - tstartmainloop);
///////////////////////////////
// End main kernels
///////////////////////////////
double tstartperiod=omp_get_wtime();
computePeriodSuperSmoother(pgram, numFreq, minFreq, maxFreq, foundPeriod);
double tendperiod=omp_get_wtime();
printf("\nFound period: %f", *foundPeriod);
printf("\nTime to compute period: %f", tendperiod - tstartperiod);
double tstartfree=omp_get_wtime();
//free device data
#pragma omp parallel for num_threads(NUMGPU)
for (int i=0; i<NUMGPU; i++)
{
cudaFree(dev_pgram[i]);
cudaFree(dev_freqarr[i]);
cudaFree(dev_argkeys[i]);
cudaFree(dev_smo[i]);
cudaFree(dev_t1[i]);
cudaFree(dev_t1_sortby_argkeys[i]);
cudaFree(dev_data_sortby_argkeys[i]);
cudaFree(dev_weights_sortby_argkeys[i]);
cudaFree(dev_tt[i]);
cudaFree(dev_data[i]);
cudaFree(dev_weights[i]);
}
//free host data
free(weights);
free(tt);
double tendfree=omp_get_wtime();
printf("\nTime to free: %f", tendfree - tstartfree);
}
//Estimated memory footprint used to compute the number of batches
//used to compute the number of batches
//mode-0 is original
//mode-1 is single pass
//pass in the underestimated capacity
//singlegpuflag- 0- use NUMGPU GPUs
//singlegpuflag- 1- use 1 GPU
unsigned int computeNumBatches(bool mode, unsigned int sizeData, unsigned int numFreq, double underestGPUcapacityGiB, bool singlegpuflag)
{
printf("\n*********************");
//Memory footprint assuming FP64 data
//Single pass: sp=[1/(1024**3)]*[(8*Nf)+(3*8*Nt)+(2*4*Nf*Nt)+(5*8*Nf*Nt)+(2*3*8*nf*nt)]
//original: sp+(8*nf*nt)
double totalGiB=0.0;
//pgram
totalGiB+=sizeof(DTYPE)*numFreq/(1024*1024*1024.0);
//tt, data, weights
totalGiB+=3*sizeof(DTYPE)*sizeData/(1024*1024*1024.0);
//freqArr, argkeys
totalGiB+=2*sizeof(int)*numFreq*sizeData/(1024*1024*1024.0);
//smo, t1, t1_sortby_argkeys, data_sortby_argkeys, weights_sortby_argkeys,
totalGiB+=5*sizeof(DTYPE)*numFreq*sizeData/(1024*1024*1024.0);
// sorting (out-of-place radix sorting requires an extra n storage, but overestimate 2n because back-to-back may require 2n)
totalGiB+=2*3*sizeof(DTYPE)*numFreq*sizeData/(1024*1024*1024.0);
//account for scratch in original algorithm
if (mode==0)
{
totalGiB+=sizeof(DTYPE)*numFreq*sizeData*8/(1024*1024*1024.0);
}
printf("\nEstimated global memory footprint (GiB): %f", totalGiB);
unsigned int numBatches=ceil(totalGiB/(underestGPUcapacityGiB))*NSTREAMSPERGPU;
printf("\nMinimum number of batches: %u", numBatches);
if (singlegpuflag==0)
{
numBatches=ceil((numBatches*1.0/NUMGPU))*NUMGPU;
printf("\nNumber of batches (after ensuring batches evenly divide %d GPUs): %u", NUMGPU, numBatches);
}
else
{
printf("\nNumber of batches (after ensuring batches evenly divide 1 GPUs): %u", numBatches);
}
printf("\n*********************\n");
return numBatches;
}
void outputPgramToFile(struct lookupObj * objectLookup, unsigned int numUniqueObjects, unsigned int numFreqs, DTYPE ** pgram)
{
char fnameoutput[]="pgram_SS.txt";
printf("\nPrinting the pgram to file: %s", fnameoutput);
ofstream pgramoutput;
pgramoutput.open(fnameoutput,ios::out);
pgramoutput.precision(4);
for (unsigned int i=0; i<numUniqueObjects; i++)
{
pgramoutput<<objectLookup[i].objId<<", ";
for (unsigned int j=0; j<numFreqs; j++)
{
pgramoutput<<(*pgram)[(i*numFreqs)+j]<<", ";
}
pgramoutput<<endl;
}
pgramoutput.close();
}
void outputPeriodsToFile(struct lookupObj * objectLookup, unsigned int numUniqueObjects, DTYPE * foundPeriod)
{
char fnamebestperiods[]="bestperiods_SS.txt";
printf("\nPrinting the best periods to file: %s", fnamebestperiods);
ofstream bestperiodsoutput;
bestperiodsoutput.open(fnamebestperiods,ios::out);
bestperiodsoutput.precision(7);
for (unsigned int i=0; i<numUniqueObjects; i++)
{
bestperiodsoutput<<objectLookup[i].objId<<", "<<foundPeriod[i]<<endl;
}
bestperiodsoutput.close();
}
void outputPeriodsToStdout(struct lookupObj * objectLookup, unsigned int numUniqueObjects, DTYPE * foundPeriod)
{
for (unsigned int i=0; i<numUniqueObjects; i++)
{
printf("\nObject: %d Period: %f, ",objectLookup[i].objId,foundPeriod[i]);
}
}
|
5143c7162f08c96a986a2691e800c7926be06261.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void lhkde ( const int n, const float *a, const float *b, float *l, float *h ) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if ( i < n ) {
l[i] = a[i] - 3 * b[i];
h[i] = a[i] + 3 * b[i];
}
} | 5143c7162f08c96a986a2691e800c7926be06261.cu | #include "includes.h"
__global__ void lhkde ( const int n, const float *a, const float *b, float *l, float *h ) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if ( i < n ) {
l[i] = a[i] - 3 * b[i];
h[i] = a[i] + 3 * b[i];
}
} |
74b2a536f0abab278cca42c2e46b08ddb0f2ec93.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "addVectors.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int entries = 1;
const float *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
const float *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
float *ab = NULL;
hipMalloc(&ab, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
addVectors), dim3(gridBlock),dim3(threadBlock), 0, 0, entries,a,b,ab);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
addVectors), dim3(gridBlock),dim3(threadBlock), 0, 0, entries,a,b,ab);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
addVectors), dim3(gridBlock),dim3(threadBlock), 0, 0, entries,a,b,ab);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 74b2a536f0abab278cca42c2e46b08ddb0f2ec93.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "addVectors.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int entries = 1;
const float *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
const float *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
float *ab = NULL;
cudaMalloc(&ab, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
addVectors<<<gridBlock,threadBlock>>>(entries,a,b,ab);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
addVectors<<<gridBlock,threadBlock>>>(entries,a,b,ab);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
addVectors<<<gridBlock,threadBlock>>>(entries,a,b,ab);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
fd541899d84500dddb4028dab33ede1b4fc84ad4.hip | // !!! This is a file automatically generated by hipify!!!
#include <assert.h>
#include <iostream>
#include <helper_cuda.h>
#include <sstream>
#include "../caps.h"
#include "../debug.h"
#define tst_flprintf( format, ...) printf ( "[d]%s:%d %s " format, __FILE__, __LINE__, __func__, __VA_ARGS__)
#define tst_prlocf(exp) printf( "[d]" __FILE__ "(%d): " exp, __LINE__)
__constant__ uint tst_debugFlags;
uint h_debugFlags;
using std::string;
using std::stringstream;
using std::cout;
inline __host__ __device__ bool tst_checkDebug(uint flags) {
#ifndef __CUDA_ARCH__
return h_debugFlags & flags;
#else
//#ifdef CuMatrix_DebugBuild
return tst_debugFlags & flags;
//#else
// return false;
//#endif
#endif
}
void _setCurrGpuDebugFlags(uint flags, bool orThem, bool andThem, hipStream_t stream ) {
uint curr = flags;
if(orThem) {
tst_prlocf("copying DebugFlag fr device for or'n...\n");
checkCudaErrors(hipMemcpyFromSymbol(&curr, tst_debugFlags,sizeof(uint)));
curr |= flags;
} else if(andThem) {
tst_prlocf("copying DebugFlag fr device fur and'n...\n");
checkCudaErrors(hipMemcpyFromSymbol(&curr, tst_debugFlags,sizeof(uint)));
curr &= flags;
}
tst_prlocf("copying DebugFlag to device...\n");
checkCudaErrors(hipMemcpyToSymbolAsync(tst_debugFlags,&curr,sizeof(uint),0, hipMemcpyHostToDevice, stream));
tst_prlocf("copied to device\n");
h_debugFlags = curr;
}
void _setAllGpuDebugFlags(uint flags, bool orThem, bool andThem ) {
tst_prlocf("_setAllGpuDebugFlags entre...\n");
int devCount, currDev;
checkCudaErrors(hipGetDeviceCount(&devCount));
checkCudaErrors(hipGetDevice(&currDev));
tst_flprintf("device count %d\n",devCount);
tst_flprintf("curr device %d\n",currDev);
hipStream_t *streams = (hipStream_t *) malloc(
devCount * sizeof(hipStream_t));
for(int i = 0; i < devCount;i++) {
if(strstr("gtx980m", "750 Ti")) {
tst_prlocf("not skipping sluggish 750 ti\n");
//continue;
}
tst_flprintf("setting DbugFlags for device %s %d\n","gtx980m",i);
ExecCaps_visitDevice(i);
tst_flprintf("set device %d\n",i);
checkCudaErrors(hipStreamCreateWithFlags(&(streams[i]), hipStreamNonBlocking));
tst_prlocf("create stream\n");
_setCurrGpuDebugFlags(flags,orThem,andThem, streams[i]);
tst_prlocf("set gpu dbg flags\n");
}
for(int i = 0; i < devCount; i++) {
tst_flprintf("synching stream for dev %d\n",i);
checkCudaErrors(hipStreamSynchronize(streams[i]));
checkCudaErrors(hipStreamDestroy(streams[i]));
}
ExecCaps_setDevice(currDev);
}
__host__ __device__ void tst_expNotation(char* buff, long val) {
double factor = 1.;
if (val >= Giga) {
factor = 1. / Giga;
#ifndef __CUDA_ARCH__
sprintf(buff, "%2.3gGb", val * factor);
#endif
} else if (val >= Mega) {
factor = 1. / Mega;
#ifndef __CUDA_ARCH__
sprintf(buff, "%2.3gMb", val * factor);
#endif
} else if (val >= Kilo) {
factor = 1. / Kilo;
#ifndef __CUDA_ARCH__
sprintf(buff, "%2.3gKb", val * factor);
#endif
} else {
#ifndef __CUDA_ARCH__
sprintf(buff, "%2.3gb", val * factor);
#endif
}
}
string tst_expNotation(long val) {
char buff[256];
tst_expNotation(buff, val);
stringstream ss;
ss << buff;
return ss.str();
}
__host__ void _allGpuMem(size_t* free, size_t* total) {
int orgDev;
checkCudaErrors(hipGetDevice(&orgDev));
*free=0;
*total=0;
size_t lFree = 0, lTotal = 0;
int devCnt;
char buff[20];
checkCudaErrors(hipGetDeviceCount(&devCnt));
cout << " ";
assert(false);
for(int i = 0; i < devCnt;i++) {
ExecCaps_visitDevice(i);
checkCudaErrors(hipMemGetInfo(&lFree, &lTotal));
*free += lFree;
*total += lTotal;
sprintf(buff, " (%.2f%% used)", 100 * (1 - lFree * 1. / lTotal));
if(tst_checkDebug(tst_debugFlags)) cout << "[" << i <<": " << tst_expNotation(lFree) << " free /" << tst_expNotation(lTotal) << buff<< "] ";
}
cout << endl;
ExecCaps_setDevice(orgDev);
}
double _usedMemRatio(bool allDevices) {
size_t freeMemory, totalMemory;
if(allDevices)
_allGpuMem(&freeMemory, &totalMemory);
else {
assert(false);
cout << "calling hipMemGetInfo\n";
hipMemGetInfo(&freeMemory, &totalMemory);
cout << "callied hipMemGetInfo\n";
}
int currDev;
checkCudaErrors(hipGetDevice(&currDev));
if (tst_debugFlags) {
if(allDevices )
cout << "\tallDev freeMemory " << freeMemory << ", total " << totalMemory << "\n";
else
cout << "\tdev " << currDev<< " freeMemory " << freeMemory << ", total " << totalMemory << "\n";
}
return 100 * (1 - freeMemory * 1. / totalMemory);
}
void _usedDmem(bool allDevices) {
cout << "Memory " << _usedMemRatio(allDevices) << "% used\n";
}
int dmain(int argc, const char **argv) {
uint localDbgFlags = 1 << 2 | 1 << 5;
tst_flprintf("localDbgFlags %d\n",localDbgFlags);
_usedDmem(true);
_setAllGpuDebugFlags(localDbgFlags,false,false);
tst_prlocf("set debug flags\n");
_usedDmem(true);
}
| fd541899d84500dddb4028dab33ede1b4fc84ad4.cu | #include <assert.h>
#include <iostream>
#include <helper_cuda.h>
#include <sstream>
#include "../caps.h"
#include "../debug.h"
#define tst_flprintf( format, ...) printf ( "[d]%s:%d %s " format, __FILE__, __LINE__, __func__, __VA_ARGS__)
#define tst_prlocf(exp) printf( "[d]" __FILE__ "(%d): " exp, __LINE__)
__constant__ uint tst_debugFlags;
uint h_debugFlags;
using std::string;
using std::stringstream;
using std::cout;
inline __host__ __device__ bool tst_checkDebug(uint flags) {
#ifndef __CUDA_ARCH__
return h_debugFlags & flags;
#else
//#ifdef CuMatrix_DebugBuild
return tst_debugFlags & flags;
//#else
// return false;
//#endif
#endif
}
void _setCurrGpuDebugFlags(uint flags, bool orThem, bool andThem, cudaStream_t stream ) {
uint curr = flags;
if(orThem) {
tst_prlocf("copying DebugFlag fr device for or'n...\n");
checkCudaErrors(cudaMemcpyFromSymbol(&curr, tst_debugFlags,sizeof(uint)));
curr |= flags;
} else if(andThem) {
tst_prlocf("copying DebugFlag fr device fur and'n...\n");
checkCudaErrors(cudaMemcpyFromSymbol(&curr, tst_debugFlags,sizeof(uint)));
curr &= flags;
}
tst_prlocf("copying DebugFlag to device...\n");
checkCudaErrors(cudaMemcpyToSymbolAsync(tst_debugFlags,&curr,sizeof(uint),0, cudaMemcpyHostToDevice, stream));
tst_prlocf("copied to device\n");
h_debugFlags = curr;
}
void _setAllGpuDebugFlags(uint flags, bool orThem, bool andThem ) {
tst_prlocf("_setAllGpuDebugFlags entre...\n");
int devCount, currDev;
checkCudaErrors(cudaGetDeviceCount(&devCount));
checkCudaErrors(cudaGetDevice(&currDev));
tst_flprintf("device count %d\n",devCount);
tst_flprintf("curr device %d\n",currDev);
cudaStream_t *streams = (cudaStream_t *) malloc(
devCount * sizeof(cudaStream_t));
for(int i = 0; i < devCount;i++) {
if(strstr("gtx980m", "750 Ti")) {
tst_prlocf("not skipping sluggish 750 ti\n");
//continue;
}
tst_flprintf("setting DbugFlags for device %s %d\n","gtx980m",i);
ExecCaps_visitDevice(i);
tst_flprintf("set device %d\n",i);
checkCudaErrors(cudaStreamCreateWithFlags(&(streams[i]), cudaStreamNonBlocking));
tst_prlocf("create stream\n");
_setCurrGpuDebugFlags(flags,orThem,andThem, streams[i]);
tst_prlocf("set gpu dbg flags\n");
}
for(int i = 0; i < devCount; i++) {
tst_flprintf("synching stream for dev %d\n",i);
checkCudaErrors(cudaStreamSynchronize(streams[i]));
checkCudaErrors(cudaStreamDestroy(streams[i]));
}
ExecCaps_setDevice(currDev);
}
__host__ __device__ void tst_expNotation(char* buff, long val) {
double factor = 1.;
if (val >= Giga) {
factor = 1. / Giga;
#ifndef __CUDA_ARCH__
sprintf(buff, "%2.3gGb", val * factor);
#endif
} else if (val >= Mega) {
factor = 1. / Mega;
#ifndef __CUDA_ARCH__
sprintf(buff, "%2.3gMb", val * factor);
#endif
} else if (val >= Kilo) {
factor = 1. / Kilo;
#ifndef __CUDA_ARCH__
sprintf(buff, "%2.3gKb", val * factor);
#endif
} else {
#ifndef __CUDA_ARCH__
sprintf(buff, "%2.3gb", val * factor);
#endif
}
}
string tst_expNotation(long val) {
char buff[256];
tst_expNotation(buff, val);
stringstream ss;
ss << buff;
return ss.str();
}
__host__ void _allGpuMem(size_t* free, size_t* total) {
int orgDev;
checkCudaErrors(cudaGetDevice(&orgDev));
*free=0;
*total=0;
size_t lFree = 0, lTotal = 0;
int devCnt;
char buff[20];
checkCudaErrors(cudaGetDeviceCount(&devCnt));
cout << " ";
assert(false);
for(int i = 0; i < devCnt;i++) {
ExecCaps_visitDevice(i);
checkCudaErrors(cudaMemGetInfo(&lFree, &lTotal));
*free += lFree;
*total += lTotal;
sprintf(buff, " (%.2f%% used)", 100 * (1 - lFree * 1. / lTotal));
if(tst_checkDebug(tst_debugFlags)) cout << "[" << i <<": " << tst_expNotation(lFree) << " free /" << tst_expNotation(lTotal) << buff<< "] ";
}
cout << endl;
ExecCaps_setDevice(orgDev);
}
double _usedMemRatio(bool allDevices) {
size_t freeMemory, totalMemory;
if(allDevices)
_allGpuMem(&freeMemory, &totalMemory);
else {
assert(false);
cout << "calling cudaMemGetInfo\n";
cudaMemGetInfo(&freeMemory, &totalMemory);
cout << "callied cudaMemGetInfo\n";
}
int currDev;
checkCudaErrors(cudaGetDevice(&currDev));
if (tst_debugFlags) {
if(allDevices )
cout << "\tallDev freeMemory " << freeMemory << ", total " << totalMemory << "\n";
else
cout << "\tdev " << currDev<< " freeMemory " << freeMemory << ", total " << totalMemory << "\n";
}
return 100 * (1 - freeMemory * 1. / totalMemory);
}
void _usedDmem(bool allDevices) {
cout << "Memory " << _usedMemRatio(allDevices) << "% used\n";
}
int dmain(int argc, const char **argv) {
uint localDbgFlags = 1 << 2 | 1 << 5;
tst_flprintf("localDbgFlags %d\n",localDbgFlags);
_usedDmem(true);
_setAllGpuDebugFlags(localDbgFlags,false,false);
tst_prlocf("set debug flags\n");
_usedDmem(true);
}
|
2332e0f4122110cc73f52fff5efa2f579f68a965.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stddef.h>
#include <stdint.h>
#include "model_gpu_utils.h"
#include "ten_tusscher_2004_epi_S2_18.h"
extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) {
print_to_stdout_and_file("Using ten Tusscher 2004 epi GPU model\n");
// execution configuration
const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t size = num_volumes*sizeof(real);
check_cuda_error(hipMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ));
check_cuda_error(hipMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t)));
hipLaunchKernelGGL(( kernel_set_model_inital_conditions) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, *sv, num_volumes);
check_cuda_error( hipPeekAtLastError() );
hipDeviceSynchronize();
return pitch_h;
}
extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) {
// execution configuration
const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t stim_currents_size = sizeof(real)*num_cells_to_solve;
size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve;
real *stims_currents_device;
check_cuda_error(hipMalloc((void **) &stims_currents_device, stim_currents_size));
check_cuda_error(hipMemcpy(stims_currents_device, stim_currents, stim_currents_size, hipMemcpyHostToDevice));
//the array cells to solve is passed when we are using and adapative mesh
uint32_t *cells_to_solve_device = NULL;
if(cells_to_solve != NULL) {
check_cuda_error(hipMalloc((void **) &cells_to_solve_device, cells_to_solve_size));
check_cuda_error(hipMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, hipMemcpyHostToDevice));
}
hipLaunchKernelGGL(( solve_gpu) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, dt, sv, stims_currents_device, cells_to_solve_device, num_cells_to_solve, num_steps);
check_cuda_error( hipPeekAtLastError() );
check_cuda_error(hipFree(stims_currents_device));
if(cells_to_solve_device) check_cuda_error(hipFree(cells_to_solve_device));
}
__global__ void kernel_set_model_inital_conditions(real *sv, int num_volumes)
{
// Thread ID
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if(threadID < num_volumes) {
/* *((real*)((char*)sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real*)((char*)sv + pitch * 1) + threadID) = 0.f; //M
*((real*)((char*)sv + pitch * 2) + threadID) = 0.75; //H
*((real*)((char*)sv + pitch * 3) + threadID) = 0.75f; //J
*((real*)((char*)sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real*)((char*)sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real*)((char*)sv + pitch * 6) + threadID) = 0.f; //Xs
*((real*)((char*)sv + pitch * 7) + threadID) = 1.f; //S
*((real*)((char*)sv + pitch * 8) + threadID) = 0.f; //R
*((real*)((char*)sv + pitch * 9) + threadID) = 0.f; //D
*((real*)((char*)sv + pitch * 10) + threadID) = 1.f; //F
*((real*)((char*)sv + pitch * 11) + threadID) = 1.f; //FCa
*((real*)((char*)sv + pitch * 12) + threadID) = 1.f; //G
*((real*)((char*)sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real*)((char*)sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real*)((char*)sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real*)((char*)sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.5952182591768,0.00128266400523176,0.780370393090429,0.780208222766858,0.000174041905078485,0.485370727173588,0.00293466121399432,0.999998357055344,1.92482840573537e-08,1.88428105751378e-05,0.999770837182767,1.00699532179645,0.999993733315635,4.75139548173797e-05,0.266377866651071,10.2975786179389,139.536672800382};
for (uint32_t i = 0; i < NEQ; i++)
*((real*)((char*)sv + pitch * i) + threadID) = sv_sst[i];
}
}
// Solving the model for each cell in the tissue matrix ni x nj
__global__ void solve_gpu(real dt, real *sv, real* stim_currents,
uint32_t *cells_to_solve, uint32_t num_cells_to_solve,
int num_steps)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
int sv_id;
// Each thread solves one cell model
if(threadID < num_cells_to_solve) {
if(cells_to_solve)
sv_id = cells_to_solve[threadID];
else
sv_id = threadID;
real rDY[NEQ];
for (int n = 0; n < num_steps; ++n) {
RHS_gpu(sv, rDY, stim_currents[threadID], sv_id, dt);
*((real*)((char*)sv) + sv_id) = dt*rDY[0] + *((real*)((char*)sv) + sv_id);
for(int i = 0; i < NEQ; i++) {
*((real*)((char*)sv + pitch * i) + sv_id) = rDY[i];
}
}
}
}
inline __device__ void RHS_gpu(real *sv, real *rDY_, real stim_current, int threadID_, real dt) {
// State variables
real svolt = *((real*)((char*)sv + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
///#ifdef EPI
real Gks=0.245;
///#endif
///#ifdef ENDO
/// real Gks=0.245;
///#endif
///#ifdef MCELL
//real Gks=0.062;
///#endif
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
///#ifdef EPI
real Gto=0.294;
///#endif
///#ifdef ENDO
/// real Gto=0.073;
///#endif
///#ifdef MCELL
/// real Gto=0.294;
///#endif
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
// Setting Elnaz's parameters
real parameters []={14.5369194152843,0.000421161732329444,0.000123555730992675,0.000438546024943873,0.268273630830681,0.123585165023946,0.171035514336793,5.02847725301225,0.0110176202871206,1.84752137000130,1095.52052508604,0.000393152126659795,0.528629865494676,0.00975540076461500,0.00491948125354052,8.11442676720905e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
/// A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
///Ileak=0.00008f*(CaSR-Cai);
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
#ifdef EPI
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
#ifdef ENDO
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+28)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.;
#endif
#ifdef MCELL
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
| 2332e0f4122110cc73f52fff5efa2f579f68a965.cu | #include <stddef.h>
#include <stdint.h>
#include "model_gpu_utils.h"
#include "ten_tusscher_2004_epi_S2_18.h"
extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) {
print_to_stdout_and_file("Using ten Tusscher 2004 epi GPU model\n");
// execution configuration
const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t size = num_volumes*sizeof(real);
check_cuda_error(cudaMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ));
check_cuda_error(cudaMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t)));
kernel_set_model_inital_conditions <<<GRID, BLOCK_SIZE>>>(*sv, num_volumes);
check_cuda_error( cudaPeekAtLastError() );
cudaDeviceSynchronize();
return pitch_h;
}
extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) {
// execution configuration
const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t stim_currents_size = sizeof(real)*num_cells_to_solve;
size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve;
real *stims_currents_device;
check_cuda_error(cudaMalloc((void **) &stims_currents_device, stim_currents_size));
check_cuda_error(cudaMemcpy(stims_currents_device, stim_currents, stim_currents_size, cudaMemcpyHostToDevice));
//the array cells to solve is passed when we are using and adapative mesh
uint32_t *cells_to_solve_device = NULL;
if(cells_to_solve != NULL) {
check_cuda_error(cudaMalloc((void **) &cells_to_solve_device, cells_to_solve_size));
check_cuda_error(cudaMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, cudaMemcpyHostToDevice));
}
solve_gpu <<<GRID, BLOCK_SIZE>>>(dt, sv, stims_currents_device, cells_to_solve_device, num_cells_to_solve, num_steps);
check_cuda_error( cudaPeekAtLastError() );
check_cuda_error(cudaFree(stims_currents_device));
if(cells_to_solve_device) check_cuda_error(cudaFree(cells_to_solve_device));
}
__global__ void kernel_set_model_inital_conditions(real *sv, int num_volumes)
{
// Thread ID
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if(threadID < num_volumes) {
/* *((real*)((char*)sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real*)((char*)sv + pitch * 1) + threadID) = 0.f; //M
*((real*)((char*)sv + pitch * 2) + threadID) = 0.75; //H
*((real*)((char*)sv + pitch * 3) + threadID) = 0.75f; //J
*((real*)((char*)sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real*)((char*)sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real*)((char*)sv + pitch * 6) + threadID) = 0.f; //Xs
*((real*)((char*)sv + pitch * 7) + threadID) = 1.f; //S
*((real*)((char*)sv + pitch * 8) + threadID) = 0.f; //R
*((real*)((char*)sv + pitch * 9) + threadID) = 0.f; //D
*((real*)((char*)sv + pitch * 10) + threadID) = 1.f; //F
*((real*)((char*)sv + pitch * 11) + threadID) = 1.f; //FCa
*((real*)((char*)sv + pitch * 12) + threadID) = 1.f; //G
*((real*)((char*)sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real*)((char*)sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real*)((char*)sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real*)((char*)sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.5952182591768,0.00128266400523176,0.780370393090429,0.780208222766858,0.000174041905078485,0.485370727173588,0.00293466121399432,0.999998357055344,1.92482840573537e-08,1.88428105751378e-05,0.999770837182767,1.00699532179645,0.999993733315635,4.75139548173797e-05,0.266377866651071,10.2975786179389,139.536672800382};
for (uint32_t i = 0; i < NEQ; i++)
*((real*)((char*)sv + pitch * i) + threadID) = sv_sst[i];
}
}
// Solving the model for each cell in the tissue matrix ni x nj
__global__ void solve_gpu(real dt, real *sv, real* stim_currents,
uint32_t *cells_to_solve, uint32_t num_cells_to_solve,
int num_steps)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
int sv_id;
// Each thread solves one cell model
if(threadID < num_cells_to_solve) {
if(cells_to_solve)
sv_id = cells_to_solve[threadID];
else
sv_id = threadID;
real rDY[NEQ];
for (int n = 0; n < num_steps; ++n) {
RHS_gpu(sv, rDY, stim_currents[threadID], sv_id, dt);
*((real*)((char*)sv) + sv_id) = dt*rDY[0] + *((real*)((char*)sv) + sv_id);
for(int i = 0; i < NEQ; i++) {
*((real*)((char*)sv + pitch * i) + sv_id) = rDY[i];
}
}
}
}
inline __device__ void RHS_gpu(real *sv, real *rDY_, real stim_current, int threadID_, real dt) {
// State variables
real svolt = *((real*)((char*)sv + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
///#ifdef EPI
real Gks=0.245;
///#endif
///#ifdef ENDO
/// real Gks=0.245;
///#endif
///#ifdef MCELL
//real Gks=0.062;
///#endif
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
///#ifdef EPI
real Gto=0.294;
///#endif
///#ifdef ENDO
/// real Gto=0.073;
///#endif
///#ifdef MCELL
/// real Gto=0.294;
///#endif
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
// Setting Elnaz's parameters
real parameters []={14.5369194152843,0.000421161732329444,0.000123555730992675,0.000438546024943873,0.268273630830681,0.123585165023946,0.171035514336793,5.02847725301225,0.0110176202871206,1.84752137000130,1095.52052508604,0.000393152126659795,0.528629865494676,0.00975540076461500,0.00491948125354052,8.11442676720905e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
/// A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
///Ileak=0.00008f*(CaSR-Cai);
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
#ifdef EPI
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
#ifdef ENDO
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+28)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.;
#endif
#ifdef MCELL
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
a1db6974bd5fa5d66375066c04caf9f0d9865461.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "convert_float2bgr.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *annd = NULL;
hipMalloc(&annd, XSIZE*YSIZE);
unsigned char *bgr = NULL;
hipMalloc(&bgr, XSIZE*YSIZE);
int w = XSIZE;
int h = YSIZE;
float minval = 1;
float maxval = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
convert_float2bgr), dim3(gridBlock),dim3(threadBlock), 0, 0, annd,bgr,w,h,minval,maxval);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
convert_float2bgr), dim3(gridBlock),dim3(threadBlock), 0, 0, annd,bgr,w,h,minval,maxval);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
convert_float2bgr), dim3(gridBlock),dim3(threadBlock), 0, 0, annd,bgr,w,h,minval,maxval);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | a1db6974bd5fa5d66375066c04caf9f0d9865461.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "convert_float2bgr.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *annd = NULL;
cudaMalloc(&annd, XSIZE*YSIZE);
unsigned char *bgr = NULL;
cudaMalloc(&bgr, XSIZE*YSIZE);
int w = XSIZE;
int h = YSIZE;
float minval = 1;
float maxval = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
convert_float2bgr<<<gridBlock,threadBlock>>>(annd,bgr,w,h,minval,maxval);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
convert_float2bgr<<<gridBlock,threadBlock>>>(annd,bgr,w,h,minval,maxval);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
convert_float2bgr<<<gridBlock,threadBlock>>>(annd,bgr,w,h,minval,maxval);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
fec76d264704024c38af75a80a1867780d4bf2d8.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
void print_array(float *A, int N)
{
for(int i=0;i<N;i++)
printf("%.2f ",A[i]);
printf("\n");
}
__global__ void
process_kernel1(float *input1, float *input2, float *output, int datasize)
{
int numElements = datasize / sizeof(float);
//Write code for i
int blockNum = blockIdx.z * ( gridDim.x * gridDim.y ) + blockIdx.y * gridDim.x +blockIdx.x ;
int threadNum = threadIdx.z * ( blockDim.x * blockDim.y ) + threadIdx.y * ( blockDim.x ) + threadIdx.x ;
int i = blockNum * ( blockDim.x * blockDim.y * blockDim.z ) + threadNum;
if (i < numElements)
{
output[i] = sinf(input1[i]) + cosf(input2[i]);
}
}
__global__ void
process_kernel2(float *input, float *output, int datasize)
{
int numElements = datasize / sizeof(float);
//Write code for i
int blockNum = blockIdx.z * ( gridDim.x * gridDim.y ) + blockIdx.y * gridDim.x +blockIdx.x ;
int threadNum = threadIdx.z * ( blockDim.x * blockDim.y ) + threadIdx.y * ( blockDim.x ) + threadIdx.x ;
int i = blockNum * ( blockDim.x * blockDim.y * blockDim.z ) + threadNum;
if (i < numElements)
{
output[i] = logf(input[i]);
}
}
__global__ void
process_kernel3(float *input, float *output, int datasize)
{
int numElements = datasize / sizeof(float);
//Write code for i
int blockNum = blockIdx.z * ( gridDim.x * gridDim.y ) + blockIdx.y * gridDim.x +blockIdx.x ;
int threadNum = threadIdx.z * ( blockDim.x * blockDim.y ) + threadIdx.y * ( blockDim.x ) + threadIdx.x ;
int i = blockNum * ( blockDim.x * blockDim.y * blockDim.z ) + threadNum;
if (i < numElements)
{
output[i] = sqrtf(input[i]);
}
}
int main(void)
{
hipError_t err = hipSuccess;
int numElements = 16384;
size_t size = numElements * sizeof(float);
float *h_input1 = (float *)malloc(size);
float *h_input2 = (float *)malloc(size);
float *h_output1 = (float *)malloc(size);
float *h_output2 = (float *)malloc(size);
float *h_output3 = (float *)malloc(size);
if (h_input1 == NULL || h_input2 == NULL || h_output1 == NULL || h_output2 == NULL || h_output3 == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
for (int i = 0; i < numElements; ++i)
{
scanf("%f",&h_input1[i]);
}
for (int i = 0; i < numElements; ++i)
{
scanf("%f",&h_input2[i]);
}
float *d_input1 = NULL;
err = hipMalloc((void **)&d_input1, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector d_input1 (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
float *d_input2 = NULL;
err = hipMalloc((void **)&d_input2, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector d_input2 (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
float *d_output1 = NULL;
err = hipMalloc((void **)&d_output1, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector h_output1 (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
float *d_output2 = NULL;
err = hipMalloc((void **)&d_output2, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector h_output2 (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
float *d_output3 = NULL;
err = hipMalloc((void **)&d_output3, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector h_output3 (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// printf("Copy input data from the host memory to the CUDA device\n");
err = hipMemcpy(d_input1, h_input1, size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector h_input1 from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_input2, h_input2, size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector h_input2 from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
//Complete Code for launching process_kernel1
dim3 threadsPerBlock1 (4 , 2 , 2) ;
dim3 blocksPerGrid1 (32,32,1);
hipLaunchKernelGGL(( process_kernel1), dim3(blocksPerGrid1), dim3(threadsPerBlock1), 0, 0, d_input1, d_input2, d_output1, size);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch process_kernel1 kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
//Complete Code for launching process_kernel2
dim3 threadsPerBlock2 (2 , 8 , 1) ;
dim3 blocksPerGrid2 (8,8,16);
hipLaunchKernelGGL(( process_kernel2), dim3(blocksPerGrid2), dim3(threadsPerBlock2), 0, 0, d_output1, d_output2, size);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch process_kernel2 kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
//Complete Code for launching process_kernel3
dim3 threadsPerBlock3 (16 , 1 , 1) ;
dim3 blocksPerGrid3 (128,8,1);
hipLaunchKernelGGL(( process_kernel3), dim3(blocksPerGrid3), dim3(threadsPerBlock3), 0, 0, d_output2, d_output3, size);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch process_kernel3 kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// printf("Copy output data from the CUDA device to the host memory\n");
err = hipMemcpy(h_output1, d_output1, size, hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector d_output1 from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(h_output2, d_output2, size, hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector d_output2 from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(h_output3, d_output3, size, hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector d_output3 from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Verify that the result vectors are as expected
for (int i = 0; i < numElements; ++i)
{
//printf( "element %d! value %f %f %f %f \n", i, h_input1[i], h_input2[i], sinf(h_input1[i]) + cosf(h_input2[i]), h_output1[i]);
if (fabs(sinf(h_input1[i]) + cosf(h_input2[i]) - h_output1[i]) > 1e-5)
{
fprintf(stderr, "Result verification for h_output1 failed at element %d! value %f %f %f \n", i, h_input1[i], h_input2[i], h_output1[i]);
exit(EXIT_FAILURE);
}
}
for (int i = 0; i < numElements; ++i)
{
if (fabs(logf(h_output1[i]) - h_output2[i]) > 1e-5)
{
fprintf(stderr, "Result verification for h_output2 failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
for (int i = 0; i < numElements; ++i)
{
if (fabs(sqrtf(h_output2[i]) - h_output3[i]) > 1e-5)
{
fprintf(stderr, "Result verification for h_output3 failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
print_array(h_output3,numElements);
err = hipFree(d_input1);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector d_input1 (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_input2);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector d_input2 (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_output1);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector d_output1 (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_output2);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector d_output2 (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_output3);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector d_output3 (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
free(h_input1);
free(h_input2);
free(h_output1);
free(h_output2);
free(h_output3);
err = hipDeviceReset();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
return 0;
}
| fec76d264704024c38af75a80a1867780d4bf2d8.cu | #include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
void print_array(float *A, int N)
{
for(int i=0;i<N;i++)
printf("%.2f ",A[i]);
printf("\n");
}
__global__ void
process_kernel1(float *input1, float *input2, float *output, int datasize)
{
int numElements = datasize / sizeof(float);
//Write code for i
int blockNum = blockIdx.z * ( gridDim.x * gridDim.y ) + blockIdx.y * gridDim.x +blockIdx.x ;
int threadNum = threadIdx.z * ( blockDim.x * blockDim.y ) + threadIdx.y * ( blockDim.x ) + threadIdx.x ;
int i = blockNum * ( blockDim.x * blockDim.y * blockDim.z ) + threadNum;
if (i < numElements)
{
output[i] = sinf(input1[i]) + cosf(input2[i]);
}
}
__global__ void
process_kernel2(float *input, float *output, int datasize)
{
int numElements = datasize / sizeof(float);
//Write code for i
int blockNum = blockIdx.z * ( gridDim.x * gridDim.y ) + blockIdx.y * gridDim.x +blockIdx.x ;
int threadNum = threadIdx.z * ( blockDim.x * blockDim.y ) + threadIdx.y * ( blockDim.x ) + threadIdx.x ;
int i = blockNum * ( blockDim.x * blockDim.y * blockDim.z ) + threadNum;
if (i < numElements)
{
output[i] = logf(input[i]);
}
}
__global__ void
process_kernel3(float *input, float *output, int datasize)
{
int numElements = datasize / sizeof(float);
//Write code for i
int blockNum = blockIdx.z * ( gridDim.x * gridDim.y ) + blockIdx.y * gridDim.x +blockIdx.x ;
int threadNum = threadIdx.z * ( blockDim.x * blockDim.y ) + threadIdx.y * ( blockDim.x ) + threadIdx.x ;
int i = blockNum * ( blockDim.x * blockDim.y * blockDim.z ) + threadNum;
if (i < numElements)
{
output[i] = sqrtf(input[i]);
}
}
int main(void)
{
cudaError_t err = cudaSuccess;
int numElements = 16384;
size_t size = numElements * sizeof(float);
float *h_input1 = (float *)malloc(size);
float *h_input2 = (float *)malloc(size);
float *h_output1 = (float *)malloc(size);
float *h_output2 = (float *)malloc(size);
float *h_output3 = (float *)malloc(size);
if (h_input1 == NULL || h_input2 == NULL || h_output1 == NULL || h_output2 == NULL || h_output3 == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
for (int i = 0; i < numElements; ++i)
{
scanf("%f",&h_input1[i]);
}
for (int i = 0; i < numElements; ++i)
{
scanf("%f",&h_input2[i]);
}
float *d_input1 = NULL;
err = cudaMalloc((void **)&d_input1, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector d_input1 (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
float *d_input2 = NULL;
err = cudaMalloc((void **)&d_input2, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector d_input2 (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
float *d_output1 = NULL;
err = cudaMalloc((void **)&d_output1, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector h_output1 (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
float *d_output2 = NULL;
err = cudaMalloc((void **)&d_output2, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector h_output2 (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
float *d_output3 = NULL;
err = cudaMalloc((void **)&d_output3, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector h_output3 (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// printf("Copy input data from the host memory to the CUDA device\n");
err = cudaMemcpy(d_input1, h_input1, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector h_input1 from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_input2, h_input2, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector h_input2 from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//Complete Code for launching process_kernel1
dim3 threadsPerBlock1 (4 , 2 , 2) ;
dim3 blocksPerGrid1 (32,32,1);
process_kernel1<<<blocksPerGrid1, threadsPerBlock1>>>(d_input1, d_input2, d_output1, size);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch process_kernel1 kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//Complete Code for launching process_kernel2
dim3 threadsPerBlock2 (2 , 8 , 1) ;
dim3 blocksPerGrid2 (8,8,16);
process_kernel2<<<blocksPerGrid2, threadsPerBlock2>>>(d_output1, d_output2, size);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch process_kernel2 kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//Complete Code for launching process_kernel3
dim3 threadsPerBlock3 (16 , 1 , 1) ;
dim3 blocksPerGrid3 (128,8,1);
process_kernel3<<<blocksPerGrid3, threadsPerBlock3>>>(d_output2, d_output3, size);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch process_kernel3 kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// printf("Copy output data from the CUDA device to the host memory\n");
err = cudaMemcpy(h_output1, d_output1, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector d_output1 from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(h_output2, d_output2, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector d_output2 from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(h_output3, d_output3, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector d_output3 from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Verify that the result vectors are as expected
for (int i = 0; i < numElements; ++i)
{
//printf( "element %d! value %f %f %f %f \n", i, h_input1[i], h_input2[i], sinf(h_input1[i]) + cosf(h_input2[i]), h_output1[i]);
if (fabs(sinf(h_input1[i]) + cosf(h_input2[i]) - h_output1[i]) > 1e-5)
{
fprintf(stderr, "Result verification for h_output1 failed at element %d! value %f %f %f \n", i, h_input1[i], h_input2[i], h_output1[i]);
exit(EXIT_FAILURE);
}
}
for (int i = 0; i < numElements; ++i)
{
if (fabs(logf(h_output1[i]) - h_output2[i]) > 1e-5)
{
fprintf(stderr, "Result verification for h_output2 failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
for (int i = 0; i < numElements; ++i)
{
if (fabs(sqrtf(h_output2[i]) - h_output3[i]) > 1e-5)
{
fprintf(stderr, "Result verification for h_output3 failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
print_array(h_output3,numElements);
err = cudaFree(d_input1);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector d_input1 (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_input2);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector d_input2 (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_output1);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector d_output1 (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_output2);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector d_output2 (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_output3);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector d_output3 (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
free(h_input1);
free(h_input2);
free(h_output1);
free(h_output2);
free(h_output3);
err = cudaDeviceReset();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
return 0;
}
|
bd2cefeca2dc3469f315ef54f52337bd589e8431.hip | // !!! This is a file automatically generated by hipify!!!
#include "opencv2/core.hpp"
#include "opencv2/core/cuda.hpp"
#include "opencv2/core/cuda_stream_accessor.hpp"
#include "opencv2/core/cuda/common.hpp"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "hip/device_functions.h"
#include "Warp/ZReproject.h"
typedef double CalcType;
struct CudaRemapParam
{
enum ImageType
{
ImageTypeRectlinear = 0,
ImageTypeFullFrameFishEye = 1,
ImageTypeDrumFishEye = 2,
ImageTypeCircularFishEye = 3
};
CalcType srcTX, srcTY;
CalcType destTX, destTY;
CalcType scale[2];
CalcType shear[2];
CalcType rot[2];
void *perspect[2];
CalcType rad[6];
CalcType mt[3][3];
CalcType distance;
CalcType horizontal;
CalcType vertical;
CalcType PI;
CalcType cropX;
CalcType cropY;
CalcType cropWidth;
CalcType cropHeight;
CalcType centx;
CalcType centy;
CalcType sqrDist;
int imageType;
};
void copyParam(const Remap& src, CudaRemapParam& dst, CalcType x, CalcType y,
CalcType width, CalcType height, CalcType centx, CalcType centy, CalcType sqrDist, int type)
{
dst.srcTX = src.srcTX;
dst.srcTY = src.srcTY;
dst.destTX = src.destTX;
dst.destTY = src.destTY;
dst.scale[0] = src.mp.scale[0];
dst.scale[1] = src.mp.scale[1];
dst.shear[0] = src.mp.shear[0];
dst.shear[1] = src.mp.shear[1];
dst.rot[0] = src.mp.rot[0];
dst.rot[1] = src.mp.rot[1];
dst.perspect[0] = src.mp.perspect[0];
dst.perspect[1] = src.mp.perspect[1];
for (int i = 0; i < 6; i++)
dst.rad[i] = src.mp.rad[i];
for (int i = 0; i < 3; i++)
{
for (int j = 0; j < 3; j++)
dst.mt[i][j] = src.mp.mt[i][j];
}
dst.distance = src.mp.distance;
dst.horizontal = src.mp.horizontal;
dst.vertical = src.mp.vertical;
dst.PI = 3.1415926535898;
dst.cropX = x;
dst.cropY = y;
dst.cropWidth = width;
dst.cropHeight = height;
dst.centx = centx;
dst.centy = centy;
dst.sqrDist = sqrDist;
dst.imageType = type;
}
__constant__ CudaRemapParam param;
__device__ void dstToSrc(float* srcx, float* srcy, int dstx, int dsty, int mapWidth, int mapHeight)
{
if (dstx >= mapWidth || dsty >= mapHeight)
return;
CalcType x_src = dstx, y_src = dsty;
x_src -= param.srcTX - 0.5;
y_src -= param.srcTY - 0.5;
CalcType tx_dest, ty_dest;
//rotate_erect
tx_dest = x_src + param.rot[1];
while (tx_dest < -param.rot[0])
tx_dest += 2 * param.rot[0];
while (tx_dest > param.rot[0])
tx_dest -= 2 * param.rot[0];
ty_dest = y_src;
x_src = tx_dest;
y_src = ty_dest;
//sphere_tp_erect
CalcType phi, theta, r;
CalcType v[3];
phi = x_src / param.distance; //
theta = -y_src / param.distance + param.PI / 2; //
if (theta < 0)
{
theta = -theta;
phi += param.PI;
}
if (theta > param.PI)
{
theta = param.PI - (theta - param.PI);
phi += param.PI;
}
v[0] = sin(theta) * sin(phi);
v[1] = cos(theta);
v[2] = sin(theta) * cos(phi);
//
CalcType v0 = v[0];
CalcType v1 = v[1];
CalcType v2 = v[2];
for (int i = 0; i<3; i++)
{
v[i] = param.mt[0][i] * v0 + param.mt[1][i] * v1 + param.mt[2][i] * v2;
}
r = sqrt(v[0] * v[0] + v[1] * v[1]);
if (r == 0.0)
theta = 0.0;
else
theta = param.distance * atan2(r, v[2]) / r;
tx_dest = theta * v[0];
ty_dest = theta * v[1];
x_src = tx_dest;
y_src = ty_dest;
if (param.imageType == CudaRemapParam::ImageTypeRectlinear) // rectilinear image
{
//SetDesc(m_stack[i], rect_sphere_tp, &(m_mp.distance) ); i++; // Convert rectilinear to spherical
CalcType rho, theta, r;
r = sqrt(x_src * x_src + y_src * y_src);
theta = r / param.distance;
if (theta >= param.PI / 2.0)
rho = 1.6e16;
else if (theta == 0.0)
rho = 1.0;
else
rho = tan(theta) / theta;
tx_dest = rho * x_src;
ty_dest = rho * y_src;
x_src = tx_dest;
y_src = ty_dest;
}
//
//SetDesc( stack[i], resize, param.scale ); i++; // Scale image
tx_dest = x_src * param.scale[0];
ty_dest = y_src * param.scale[1];
x_src = tx_dest;
y_src = ty_dest;
CalcType rt, scale;
rt = (sqrt(x_src*x_src + y_src*y_src)) / param.rad[4];
if (rt < param.rad[5])
{
scale = ((param.rad[3] * rt + param.rad[2]) * rt +
param.rad[1]) * rt + param.rad[0];
}
else
scale = 1000.0;
tx_dest = x_src * scale;
ty_dest = y_src * scale;
x_src = tx_dest;
y_src = ty_dest;
//
if (param.vertical != 0.0)
{
//SetDesc(stack[i], vert, &(param.vertical)); i++;
tx_dest = x_src;
ty_dest = y_src + param.vertical;
x_src = tx_dest;
y_src = ty_dest;
}
if (param.horizontal != 0.0)
{
//SetDesc(stack[i], horiz, &(param.horizontal)); i++;
tx_dest = x_src + param.horizontal;
ty_dest = y_src;
x_src = tx_dest;
y_src = ty_dest;
}
if (param.shear[0] != 0 || param.shear[1] != 0)
{
//SetDesc( stack[i], shear, param.shear ); i++;
tx_dest = x_src + param.shear[0] * y_src;
ty_dest = y_src + param.shear[1] * x_src;
}
tx_dest += param.destTX - 0.5;
ty_dest += param.destTY - 0.5;
if (param.imageType == CudaRemapParam::ImageTypeDrumFishEye ||
param.imageType == CudaRemapParam::ImageTypeCircularFishEye)
{
float diffx = tx_dest - param.centx;
float diffy = ty_dest - param.centy;
if (tx_dest >= param.cropX && tx_dest < param.cropX + param.cropWidth &&
ty_dest >= param.cropY && ty_dest < param.cropY + param.cropHeight &&
diffx * diffx + diffy * diffy < param.sqrDist)
{
*srcx = tx_dest;
*srcy = ty_dest;
}
else
{
*srcx = -1.0F;
*srcy = -1.0F;
}
}
else
{
*srcx = tx_dest;
*srcy = ty_dest;
}
}
__global__ void remapKernel(unsigned char* xMapData, int xMapStep,
unsigned char* yMapData, int yMapStep, int mapWidth, int mapHeight)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= mapWidth || y >= mapHeight)
return;
CalcType x_src = x, y_src = y;
x_src -= param.srcTX - 0.5;
y_src -= param.srcTY - 0.5;
CalcType tx_dest, ty_dest;
//rotate_erect
tx_dest = x_src + param.rot[1];
while (tx_dest < -param.rot[0])
tx_dest += 2 * param.rot[0];
while (tx_dest > param.rot[0])
tx_dest -= 2 * param.rot[0];
ty_dest = y_src;
x_src = tx_dest;
y_src = ty_dest;
//sphere_tp_erect
CalcType phi, theta, r;
CalcType v[3];
phi = x_src / param.distance; //
theta = -y_src / param.distance + param.PI / 2; //
if (theta < 0)
{
theta = -theta;
phi += param.PI;
}
if (theta > param.PI)
{
theta = param.PI - (theta - param.PI);
phi += param.PI;
}
v[0] = sin(theta) * sin(phi);
v[1] = cos(theta);
v[2] = sin(theta) * cos(phi);
//
CalcType v0 = v[0];
CalcType v1 = v[1];
CalcType v2 = v[2];
for (int i = 0; i<3; i++)
{
v[i] = param.mt[0][i] * v0 + param.mt[1][i] * v1 + param.mt[2][i] * v2;
}
r = sqrt(v[0] * v[0] + v[1] * v[1]);
if (r == 0.0)
theta = 0.0;
else
theta = param.distance * atan2(r, v[2]) / r;
tx_dest = theta * v[0];
ty_dest = theta * v[1];
x_src = tx_dest;
y_src = ty_dest;
if (param.imageType == CudaRemapParam::ImageTypeRectlinear) // rectilinear image
{
//SetDesc(m_stack[i], rect_sphere_tp, &(m_mp.distance) ); i++; // Convert rectilinear to spherical
CalcType rho, theta, r;
r = sqrt(x_src * x_src + y_src * y_src);
theta = r / param.distance;
if (theta >= param.PI / 2.0)
rho = 1.6e16;
else if (theta == 0.0)
rho = 1.0;
else
rho = tan(theta) / theta;
tx_dest = rho * x_src;
ty_dest = rho * y_src;
x_src = tx_dest;
y_src = ty_dest;
}
//
//SetDesc( stack[i], resize, param.scale ); i++; // Scale image
tx_dest = x_src * param.scale[0];
ty_dest = y_src * param.scale[1];
x_src = tx_dest;
y_src = ty_dest;
CalcType rt, scale;
rt = (sqrt(x_src*x_src + y_src*y_src)) / param.rad[4];
if (rt < param.rad[5])
{
scale = ((param.rad[3] * rt + param.rad[2]) * rt +
param.rad[1]) * rt + param.rad[0];
}
else
scale = 1000.0;
tx_dest = x_src * scale;
ty_dest = y_src * scale;
x_src = tx_dest;
y_src = ty_dest;
//
if (param.vertical != 0.0)
{
//SetDesc(stack[i], vert, &(param.vertical)); i++;
tx_dest = x_src;
ty_dest = y_src + param.vertical;
x_src = tx_dest;
y_src = ty_dest;
}
if (param.horizontal != 0.0)
{
//SetDesc(stack[i], horiz, &(param.horizontal)); i++;
tx_dest = x_src + param.horizontal;
ty_dest = y_src;
x_src = tx_dest;
y_src = ty_dest;
}
if (param.shear[0] != 0 || param.shear[1] != 0)
{
//SetDesc( stack[i], shear, param.shear ); i++;
tx_dest = x_src + param.shear[0] * y_src;
ty_dest = y_src + param.shear[1] * x_src;
}
tx_dest += param.destTX - 0.5;
ty_dest += param.destTY - 0.5;
if (param.imageType == CudaRemapParam::ImageTypeDrumFishEye ||
param.imageType == CudaRemapParam::ImageTypeCircularFishEye)
{
float diffx = tx_dest - param.centx;
float diffy = ty_dest - param.centy;
if (tx_dest >= param.cropX && tx_dest < param.cropX + param.cropWidth &&
ty_dest >= param.cropY && ty_dest < param.cropY + param.cropHeight &&
diffx * diffx + diffy * diffy < param.sqrDist)
{
*((float*)(xMapData + y * xMapStep) + x) = tx_dest;
*((float*)(yMapData + y * yMapStep) + x) = ty_dest;
}
else
{
*((float*)(xMapData + y * xMapStep) + x) = -1.0F;
*((float*)(yMapData + y * yMapStep) + x) = -1.0F;
}
}
else
{
*((float*)(xMapData + y * xMapStep) + x) = tx_dest;
*((float*)(yMapData + y * yMapStep) + x) = ty_dest;
}
//int x = threadIdx.x + blockIdx.x * blockDim.x;
//int y = threadIdx.y + blockIdx.y * blockDim.y;
//if (x >= mapWidth || y >= mapHeight)
// return;
//dstToSrc((float*)(xMapData + y * xMapStep) + x, (float*)(yMapData + y * yMapStep) + x, x, y, mapWidth, mapHeight);
}
void cudaGenerateReprojectMap(const PhotoParam& photoParam_,
const cv::Size& srcSize, const cv::Size& dstSize, cv::cuda::GpuMat& xmap, cv::cuda::GpuMat& ymap)
{
CV_Assert(srcSize.width > 0 && srcSize.height > 0 &&
dstSize.width > 0 && dstSize.height > 0 && dstSize.width == 2 * dstSize.height);
int dstWidth = dstSize.width, dstHeight = dstSize.height;
int srcWidth = srcSize.width, srcHeight = srcSize.height;
bool fullImage = (photoParam_.imageType == PhotoParam::ImageTypeRectlinear) ||
(photoParam_.imageType == PhotoParam::ImageTypeFullFrameFishEye);
PhotoParam photoParam = photoParam_;
if (fullImage)
{
photoParam.cropX = 0;
photoParam.cropY = 0;
photoParam.cropWidth = srcWidth;
photoParam.cropHeight = srcHeight;
}
CalcType centx = 0, centy = 0, sqrDist = 0;
if (photoParam.circleR == 0)
{
centx = photoParam.cropX + photoParam.cropWidth / 2;
centy = photoParam.cropY + photoParam.cropHeight / 2;
sqrDist = photoParam.cropWidth > photoParam.cropHeight ?
photoParam.cropWidth * photoParam.cropWidth * 0.25 :
photoParam.cropHeight * photoParam.cropHeight * 0.25;
}
else
{
centx = photoParam.circleX;
centy = photoParam.circleY;
sqrDist = photoParam.circleR * photoParam.circleR;
}
Remap remap;
remap.init(photoParam, dstWidth, dstHeight, srcWidth, srcHeight);
CudaRemapParam cudaParam;
copyParam(remap, cudaParam,
photoParam.cropX, photoParam.cropY, photoParam.cropWidth, photoParam.cropHeight,
centx, centy, sqrDist, photoParam.imageType);
cudaSafeCall(hipMemcpyToSymbol(param, &cudaParam, sizeof(CudaRemapParam)));
xmap.create(dstHeight, dstWidth, CV_32FC1);
ymap.create(dstHeight, dstWidth, CV_32FC1);
dim3 block(16, 16);
dim3 grid((dstSize.width + block.x - 1) / block.x, (dstSize.height + block.y - 1) / block.y);
hipLaunchKernelGGL(( remapKernel), dim3(grid), dim3(block), 0, 0, xmap.data, xmap.step, ymap.data, ymap.step, dstWidth, dstHeight);
cudaSafeCall(hipGetLastError());
}
void cudaGenerateReprojectMaps(const std::vector<PhotoParam>& params,
const cv::Size& srcSize, const cv::Size& dstSize, std::vector<cv::cuda::GpuMat>& xmaps, std::vector<cv::cuda::GpuMat>& ymaps)
{
int num = params.size();
xmaps.resize(num);
ymaps.resize(num);
for (int i = 0; i < num; i++)
cudaGenerateReprojectMap(params[i], srcSize, dstSize, xmaps[i], ymaps[i]);
} | bd2cefeca2dc3469f315ef54f52337bd589e8431.cu | #include "opencv2/core.hpp"
#include "opencv2/core/cuda.hpp"
#include "opencv2/core/cuda_stream_accessor.hpp"
#include "opencv2/core/cuda/common.hpp"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "device_functions.h"
#include "Warp/ZReproject.h"
typedef double CalcType;
struct CudaRemapParam
{
enum ImageType
{
ImageTypeRectlinear = 0,
ImageTypeFullFrameFishEye = 1,
ImageTypeDrumFishEye = 2,
ImageTypeCircularFishEye = 3
};
CalcType srcTX, srcTY;
CalcType destTX, destTY;
CalcType scale[2];
CalcType shear[2];
CalcType rot[2];
void *perspect[2];
CalcType rad[6];
CalcType mt[3][3];
CalcType distance;
CalcType horizontal;
CalcType vertical;
CalcType PI;
CalcType cropX;
CalcType cropY;
CalcType cropWidth;
CalcType cropHeight;
CalcType centx;
CalcType centy;
CalcType sqrDist;
int imageType;
};
void copyParam(const Remap& src, CudaRemapParam& dst, CalcType x, CalcType y,
CalcType width, CalcType height, CalcType centx, CalcType centy, CalcType sqrDist, int type)
{
dst.srcTX = src.srcTX;
dst.srcTY = src.srcTY;
dst.destTX = src.destTX;
dst.destTY = src.destTY;
dst.scale[0] = src.mp.scale[0];
dst.scale[1] = src.mp.scale[1];
dst.shear[0] = src.mp.shear[0];
dst.shear[1] = src.mp.shear[1];
dst.rot[0] = src.mp.rot[0];
dst.rot[1] = src.mp.rot[1];
dst.perspect[0] = src.mp.perspect[0];
dst.perspect[1] = src.mp.perspect[1];
for (int i = 0; i < 6; i++)
dst.rad[i] = src.mp.rad[i];
for (int i = 0; i < 3; i++)
{
for (int j = 0; j < 3; j++)
dst.mt[i][j] = src.mp.mt[i][j];
}
dst.distance = src.mp.distance;
dst.horizontal = src.mp.horizontal;
dst.vertical = src.mp.vertical;
dst.PI = 3.1415926535898;
dst.cropX = x;
dst.cropY = y;
dst.cropWidth = width;
dst.cropHeight = height;
dst.centx = centx;
dst.centy = centy;
dst.sqrDist = sqrDist;
dst.imageType = type;
}
__constant__ CudaRemapParam param;
__device__ void dstToSrc(float* srcx, float* srcy, int dstx, int dsty, int mapWidth, int mapHeight)
{
if (dstx >= mapWidth || dsty >= mapHeight)
return;
CalcType x_src = dstx, y_src = dsty;
x_src -= param.srcTX - 0.5;
y_src -= param.srcTY - 0.5;
CalcType tx_dest, ty_dest;
//rotate_erect 中心归一化
tx_dest = x_src + param.rot[1];
while (tx_dest < -param.rot[0])
tx_dest += 2 * param.rot[0];
while (tx_dest > param.rot[0])
tx_dest -= 2 * param.rot[0];
ty_dest = y_src;
x_src = tx_dest;
y_src = ty_dest;
//sphere_tp_erect 球面坐标转化为现实坐标
CalcType phi, theta, r;
CalcType v[3];
phi = x_src / param.distance; //
theta = -y_src / param.distance + param.PI / 2; //
if (theta < 0)
{
theta = -theta;
phi += param.PI;
}
if (theta > param.PI)
{
theta = param.PI - (theta - param.PI);
phi += param.PI;
}
v[0] = sin(theta) * sin(phi);
v[1] = cos(theta);
v[2] = sin(theta) * cos(phi);
//摄像机外参
CalcType v0 = v[0];
CalcType v1 = v[1];
CalcType v2 = v[2];
for (int i = 0; i<3; i++)
{
v[i] = param.mt[0][i] * v0 + param.mt[1][i] * v1 + param.mt[2][i] * v2;
}
r = sqrt(v[0] * v[0] + v[1] * v[1]);
if (r == 0.0)
theta = 0.0;
else
theta = param.distance * atan2(r, v[2]) / r;
tx_dest = theta * v[0];
ty_dest = theta * v[1];
x_src = tx_dest;
y_src = ty_dest;
if (param.imageType == CudaRemapParam::ImageTypeRectlinear) // rectilinear image
{
//SetDesc(m_stack[i], rect_sphere_tp, &(m_mp.distance) ); i++; // Convert rectilinear to spherical
CalcType rho, theta, r;
r = sqrt(x_src * x_src + y_src * y_src);
theta = r / param.distance;
if (theta >= param.PI / 2.0)
rho = 1.6e16;
else if (theta == 0.0)
rho = 1.0;
else
rho = tan(theta) / theta;
tx_dest = rho * x_src;
ty_dest = rho * y_src;
x_src = tx_dest;
y_src = ty_dest;
}
//摄像机内参
//SetDesc( stack[i], resize, param.scale ); i++; // Scale image
tx_dest = x_src * param.scale[0];
ty_dest = y_src * param.scale[1];
x_src = tx_dest;
y_src = ty_dest;
CalcType rt, scale;
rt = (sqrt(x_src*x_src + y_src*y_src)) / param.rad[4];
if (rt < param.rad[5])
{
scale = ((param.rad[3] * rt + param.rad[2]) * rt +
param.rad[1]) * rt + param.rad[0];
}
else
scale = 1000.0;
tx_dest = x_src * scale;
ty_dest = y_src * scale;
x_src = tx_dest;
y_src = ty_dest;
//摄像机水平竖直矫正
if (param.vertical != 0.0)
{
//SetDesc(stack[i], vert, &(param.vertical)); i++;
tx_dest = x_src;
ty_dest = y_src + param.vertical;
x_src = tx_dest;
y_src = ty_dest;
}
if (param.horizontal != 0.0)
{
//SetDesc(stack[i], horiz, &(param.horizontal)); i++;
tx_dest = x_src + param.horizontal;
ty_dest = y_src;
x_src = tx_dest;
y_src = ty_dest;
}
if (param.shear[0] != 0 || param.shear[1] != 0)
{
//SetDesc( stack[i], shear, param.shear ); i++;
tx_dest = x_src + param.shear[0] * y_src;
ty_dest = y_src + param.shear[1] * x_src;
}
tx_dest += param.destTX - 0.5;
ty_dest += param.destTY - 0.5;
if (param.imageType == CudaRemapParam::ImageTypeDrumFishEye ||
param.imageType == CudaRemapParam::ImageTypeCircularFishEye)
{
float diffx = tx_dest - param.centx;
float diffy = ty_dest - param.centy;
if (tx_dest >= param.cropX && tx_dest < param.cropX + param.cropWidth &&
ty_dest >= param.cropY && ty_dest < param.cropY + param.cropHeight &&
diffx * diffx + diffy * diffy < param.sqrDist)
{
*srcx = tx_dest;
*srcy = ty_dest;
}
else
{
*srcx = -1.0F;
*srcy = -1.0F;
}
}
else
{
*srcx = tx_dest;
*srcy = ty_dest;
}
}
__global__ void remapKernel(unsigned char* xMapData, int xMapStep,
unsigned char* yMapData, int yMapStep, int mapWidth, int mapHeight)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= mapWidth || y >= mapHeight)
return;
CalcType x_src = x, y_src = y;
x_src -= param.srcTX - 0.5;
y_src -= param.srcTY - 0.5;
CalcType tx_dest, ty_dest;
//rotate_erect 中心归一化
tx_dest = x_src + param.rot[1];
while (tx_dest < -param.rot[0])
tx_dest += 2 * param.rot[0];
while (tx_dest > param.rot[0])
tx_dest -= 2 * param.rot[0];
ty_dest = y_src;
x_src = tx_dest;
y_src = ty_dest;
//sphere_tp_erect 球面坐标转化为现实坐标
CalcType phi, theta, r;
CalcType v[3];
phi = x_src / param.distance; //
theta = -y_src / param.distance + param.PI / 2; //
if (theta < 0)
{
theta = -theta;
phi += param.PI;
}
if (theta > param.PI)
{
theta = param.PI - (theta - param.PI);
phi += param.PI;
}
v[0] = sin(theta) * sin(phi);
v[1] = cos(theta);
v[2] = sin(theta) * cos(phi);
//摄像机外参
CalcType v0 = v[0];
CalcType v1 = v[1];
CalcType v2 = v[2];
for (int i = 0; i<3; i++)
{
v[i] = param.mt[0][i] * v0 + param.mt[1][i] * v1 + param.mt[2][i] * v2;
}
r = sqrt(v[0] * v[0] + v[1] * v[1]);
if (r == 0.0)
theta = 0.0;
else
theta = param.distance * atan2(r, v[2]) / r;
tx_dest = theta * v[0];
ty_dest = theta * v[1];
x_src = tx_dest;
y_src = ty_dest;
if (param.imageType == CudaRemapParam::ImageTypeRectlinear) // rectilinear image
{
//SetDesc(m_stack[i], rect_sphere_tp, &(m_mp.distance) ); i++; // Convert rectilinear to spherical
CalcType rho, theta, r;
r = sqrt(x_src * x_src + y_src * y_src);
theta = r / param.distance;
if (theta >= param.PI / 2.0)
rho = 1.6e16;
else if (theta == 0.0)
rho = 1.0;
else
rho = tan(theta) / theta;
tx_dest = rho * x_src;
ty_dest = rho * y_src;
x_src = tx_dest;
y_src = ty_dest;
}
//摄像机内参
//SetDesc( stack[i], resize, param.scale ); i++; // Scale image
tx_dest = x_src * param.scale[0];
ty_dest = y_src * param.scale[1];
x_src = tx_dest;
y_src = ty_dest;
CalcType rt, scale;
rt = (sqrt(x_src*x_src + y_src*y_src)) / param.rad[4];
if (rt < param.rad[5])
{
scale = ((param.rad[3] * rt + param.rad[2]) * rt +
param.rad[1]) * rt + param.rad[0];
}
else
scale = 1000.0;
tx_dest = x_src * scale;
ty_dest = y_src * scale;
x_src = tx_dest;
y_src = ty_dest;
//摄像机水平竖直矫正
if (param.vertical != 0.0)
{
//SetDesc(stack[i], vert, &(param.vertical)); i++;
tx_dest = x_src;
ty_dest = y_src + param.vertical;
x_src = tx_dest;
y_src = ty_dest;
}
if (param.horizontal != 0.0)
{
//SetDesc(stack[i], horiz, &(param.horizontal)); i++;
tx_dest = x_src + param.horizontal;
ty_dest = y_src;
x_src = tx_dest;
y_src = ty_dest;
}
if (param.shear[0] != 0 || param.shear[1] != 0)
{
//SetDesc( stack[i], shear, param.shear ); i++;
tx_dest = x_src + param.shear[0] * y_src;
ty_dest = y_src + param.shear[1] * x_src;
}
tx_dest += param.destTX - 0.5;
ty_dest += param.destTY - 0.5;
if (param.imageType == CudaRemapParam::ImageTypeDrumFishEye ||
param.imageType == CudaRemapParam::ImageTypeCircularFishEye)
{
float diffx = tx_dest - param.centx;
float diffy = ty_dest - param.centy;
if (tx_dest >= param.cropX && tx_dest < param.cropX + param.cropWidth &&
ty_dest >= param.cropY && ty_dest < param.cropY + param.cropHeight &&
diffx * diffx + diffy * diffy < param.sqrDist)
{
*((float*)(xMapData + y * xMapStep) + x) = tx_dest;
*((float*)(yMapData + y * yMapStep) + x) = ty_dest;
}
else
{
*((float*)(xMapData + y * xMapStep) + x) = -1.0F;
*((float*)(yMapData + y * yMapStep) + x) = -1.0F;
}
}
else
{
*((float*)(xMapData + y * xMapStep) + x) = tx_dest;
*((float*)(yMapData + y * yMapStep) + x) = ty_dest;
}
//int x = threadIdx.x + blockIdx.x * blockDim.x;
//int y = threadIdx.y + blockIdx.y * blockDim.y;
//if (x >= mapWidth || y >= mapHeight)
// return;
//dstToSrc((float*)(xMapData + y * xMapStep) + x, (float*)(yMapData + y * yMapStep) + x, x, y, mapWidth, mapHeight);
}
void cudaGenerateReprojectMap(const PhotoParam& photoParam_,
const cv::Size& srcSize, const cv::Size& dstSize, cv::cuda::GpuMat& xmap, cv::cuda::GpuMat& ymap)
{
CV_Assert(srcSize.width > 0 && srcSize.height > 0 &&
dstSize.width > 0 && dstSize.height > 0 && dstSize.width == 2 * dstSize.height);
int dstWidth = dstSize.width, dstHeight = dstSize.height;
int srcWidth = srcSize.width, srcHeight = srcSize.height;
bool fullImage = (photoParam_.imageType == PhotoParam::ImageTypeRectlinear) ||
(photoParam_.imageType == PhotoParam::ImageTypeFullFrameFishEye);
PhotoParam photoParam = photoParam_;
if (fullImage)
{
photoParam.cropX = 0;
photoParam.cropY = 0;
photoParam.cropWidth = srcWidth;
photoParam.cropHeight = srcHeight;
}
CalcType centx = 0, centy = 0, sqrDist = 0;
if (photoParam.circleR == 0)
{
centx = photoParam.cropX + photoParam.cropWidth / 2;
centy = photoParam.cropY + photoParam.cropHeight / 2;
sqrDist = photoParam.cropWidth > photoParam.cropHeight ?
photoParam.cropWidth * photoParam.cropWidth * 0.25 :
photoParam.cropHeight * photoParam.cropHeight * 0.25;
}
else
{
centx = photoParam.circleX;
centy = photoParam.circleY;
sqrDist = photoParam.circleR * photoParam.circleR;
}
Remap remap;
remap.init(photoParam, dstWidth, dstHeight, srcWidth, srcHeight);
CudaRemapParam cudaParam;
copyParam(remap, cudaParam,
photoParam.cropX, photoParam.cropY, photoParam.cropWidth, photoParam.cropHeight,
centx, centy, sqrDist, photoParam.imageType);
cudaSafeCall(cudaMemcpyToSymbol(param, &cudaParam, sizeof(CudaRemapParam)));
xmap.create(dstHeight, dstWidth, CV_32FC1);
ymap.create(dstHeight, dstWidth, CV_32FC1);
dim3 block(16, 16);
dim3 grid((dstSize.width + block.x - 1) / block.x, (dstSize.height + block.y - 1) / block.y);
remapKernel<<<grid, block>>>(xmap.data, xmap.step, ymap.data, ymap.step, dstWidth, dstHeight);
cudaSafeCall(cudaGetLastError());
}
void cudaGenerateReprojectMaps(const std::vector<PhotoParam>& params,
const cv::Size& srcSize, const cv::Size& dstSize, std::vector<cv::cuda::GpuMat>& xmaps, std::vector<cv::cuda::GpuMat>& ymaps)
{
int num = params.size();
xmaps.resize(num);
ymaps.resize(num);
for (int i = 0; i < num; i++)
cudaGenerateReprojectMap(params[i], srcSize, dstSize, xmaps[i], ymaps[i]);
} |
00000782255dd0514cac8537a9537cc25b6e6bd9.hip | // !!! This is a file automatically generated by hipify!!!
#include <rocblas.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/reduce.h>
#include <thrust/functional.h>
#include <thrust/random.h>
#include <thrust/sequence.h>
#include <stdio.h>
#include <iostream>
#include "Utilities.cuh"
#include "TimingGPU.cuh"
using namespace thrust::placeholders;
// --- Required for approach #2
__device__ float *vals;
/**************************************************************/
/* CONVERT LINEAR INDEX TO ROW INDEX - NEEDED FOR APPROACH #1 */
/**************************************************************/
template <typename T>
struct linear_index_to_row_index : public thrust::unary_function<T,T> {
T Ncols; // --- Number of columns
__host__ __device__ linear_index_to_row_index(T Ncols) : Ncols(Ncols) {}
__host__ __device__ T operator()(T i) { return i / Ncols; }
};
/******************************************/
/* ROW_REDUCTION - NEEDED FOR APPROACH #2 */
/******************************************/
struct col_reduction {
const int Nrows; // --- Number of rows
const int Ncols; // --- Number of cols
col_reduction(int _Nrows, int _Ncols) : Nrows(_Nrows), Ncols(_Ncols) {}
__device__ float operator()(float& x, int& y ) {
float temp = 0.f;
for (int i = 0; i<Nrows; i++) {
temp += vals[y + (i*Ncols)];
}
return temp;
}
};
/**************************/
/* NEEDED FOR APPROACH #3 */
/**************************/
template<typename T>
struct MulC: public thrust::unary_function<T, T>
{
T C;
__host__ __device__ MulC(T c) : C(c) { }
__host__ __device__ T operator()(T x) { return x * C; }
};
/********/
/* MAIN */
/********/
int main()
{
const int Nrows = 5; // --- Number of rows
const int Ncols = 8; // --- Number of columns
// --- Random uniform integer distribution between 10 and 99
thrust::default_random_engine rng;
thrust::uniform_int_distribution<int> dist(10, 99);
// --- Matrix allocation and initialization
thrust::device_vector<float> d_matrix(Nrows * Ncols);
for (size_t i = 0; i < d_matrix.size(); i++) d_matrix[i] = (float)dist(rng);
TimingGPU timerGPU;
/***************/
/* APPROACH #1 */
/***************/
timerGPU.StartCounter();
// --- Allocate space for row sums and indices
thrust::device_vector<float> d_col_sums(Ncols);
thrust::device_vector<int> d_col_indices(Ncols);
// --- Compute row sums by summing values with equal row indices
thrust::reduce_by_key(thrust::make_transform_iterator(thrust::counting_iterator<int>(0), linear_index_to_row_index<int>(Nrows)),
thrust::make_transform_iterator(thrust::counting_iterator<int>(0), linear_index_to_row_index<int>(Nrows)) + (Nrows*Ncols),
thrust::make_permutation_iterator(
d_matrix.begin(),
thrust::make_transform_iterator(thrust::make_counting_iterator(0),(_1 % Nrows) * Ncols + _1 / Nrows)),
d_col_indices.begin(),
d_col_sums.begin(),
thrust::equal_to<int>(),
thrust::plus<float>());
//thrust::reduce_by_key(
// thrust::make_transform_iterator(thrust::make_counting_iterator(0), linear_index_to_row_index<int>(Nrows)),
// thrust::make_transform_iterator(thrust::make_counting_iterator(0), linear_index_to_row_index<int>(Nrows)) + (Nrows*Ncols),
// thrust::make_permutation_iterator(
// d_matrix.begin(),
// thrust::make_transform_iterator(thrust::make_counting_iterator(0),(_1 % Nrows) * Ncols + _1 / Nrows)),
// thrust::make_discard_iterator(),
// d_col_sums.begin());
printf("Timing for approach #1 = %f\n", timerGPU.GetCounter());
// --- Print result
for(int j = 0; j < Ncols; j++) {
std::cout << "[ ";
for(int i = 0; i < Nrows; i++)
std::cout << d_matrix[i * Ncols + j] << " ";
std::cout << "] = " << d_col_sums[j] << "\n";
}
/***************/
/* APPROACH #2 */
/***************/
timerGPU.StartCounter();
thrust::device_vector<float> d_col_sums_2(Ncols, 0);
float *s_vals = thrust::raw_pointer_cast(&d_matrix[0]);
gpuErrchk(hipMemcpyToSymbol(vals, &s_vals, sizeof(float *)));
thrust::transform(d_col_sums_2.begin(), d_col_sums_2.end(), thrust::counting_iterator<int>(0), d_col_sums_2.begin(), col_reduction(Nrows, Ncols));
printf("Timing for approach #2 = %f\n", timerGPU.GetCounter());
for(int j = 0; j < Ncols; j++) {
std::cout << "[ ";
for(int i = 0; i < Nrows; i++)
std::cout << d_matrix[i * Ncols + j] << " ";
std::cout << "] = " << d_col_sums_2[j] << "\n";
}
/***************/
/* APPROACH #3 */
/***************/
timerGPU.StartCounter();
thrust::device_vector<float> d_col_sums_3(Ncols, 0);
thrust::device_vector<float> d_temp(Nrows * Ncols);
thrust::inclusive_scan_by_key(
thrust::make_transform_iterator(thrust::make_counting_iterator(0), linear_index_to_row_index<int>(Nrows)),
thrust::make_transform_iterator(thrust::make_counting_iterator(0), linear_index_to_row_index<int>(Nrows)) + (Nrows*Ncols),
thrust::make_permutation_iterator(
d_matrix.begin(),
thrust::make_transform_iterator(thrust::make_counting_iterator(0),(_1 % Nrows) * Ncols + _1 / Nrows)),
d_temp.begin());
thrust::copy(
thrust::make_permutation_iterator(
d_temp.begin() + Nrows - 1,
thrust::make_transform_iterator(thrust::make_counting_iterator(0), MulC<int>(Nrows))),
thrust::make_permutation_iterator(
d_temp.begin() + Nrows - 1,
thrust::make_transform_iterator(thrust::make_counting_iterator(0), MulC<int>(Nrows))) + Ncols,
d_col_sums_3.begin());
printf("Timing for approach #3 = %f\n", timerGPU.GetCounter());
for(int j = 0; j < Ncols; j++) {
std::cout << "[ ";
for(int i = 0; i < Nrows; i++)
std::cout << d_matrix[i * Ncols + j] << " ";
std::cout << "] = " << d_col_sums_3[j] << "\n";
}
/***************/
/* APPROACH #4 */
/***************/
hipblasHandle_t handle;
timerGPU.StartCounter();
cublasSafeCall(hipblasCreate(&handle));
thrust::device_vector<float> d_col_sums_4(Ncols);
thrust::device_vector<float> d_ones(Nrows, 1.f);
float alpha = 1.f;
float beta = 0.f;
cublasSafeCall(hipblasSgemv(handle, HIPBLAS_OP_N, Ncols, Nrows, &alpha, thrust::raw_pointer_cast(d_matrix.data()), Ncols,
thrust::raw_pointer_cast(d_ones.data()), 1, &beta, thrust::raw_pointer_cast(d_col_sums_4.data()), 1));
printf("Timing for approach #4 = %f\n", timerGPU.GetCounter());
for(int j = 0; j < Ncols; j++) {
std::cout << "[ ";
for(int i = 0; i < Nrows; i++)
std::cout << d_matrix[i * Ncols + j] << " ";
std::cout << "] = " << d_col_sums_4[j] << "\n";
}
return 0;
}
| 00000782255dd0514cac8537a9537cc25b6e6bd9.cu | #include <cublas_v2.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/reduce.h>
#include <thrust/functional.h>
#include <thrust/random.h>
#include <thrust/sequence.h>
#include <stdio.h>
#include <iostream>
#include "Utilities.cuh"
#include "TimingGPU.cuh"
using namespace thrust::placeholders;
// --- Required for approach #2
__device__ float *vals;
/**************************************************************/
/* CONVERT LINEAR INDEX TO ROW INDEX - NEEDED FOR APPROACH #1 */
/**************************************************************/
template <typename T>
struct linear_index_to_row_index : public thrust::unary_function<T,T> {
T Ncols; // --- Number of columns
__host__ __device__ linear_index_to_row_index(T Ncols) : Ncols(Ncols) {}
__host__ __device__ T operator()(T i) { return i / Ncols; }
};
/******************************************/
/* ROW_REDUCTION - NEEDED FOR APPROACH #2 */
/******************************************/
struct col_reduction {
const int Nrows; // --- Number of rows
const int Ncols; // --- Number of cols
col_reduction(int _Nrows, int _Ncols) : Nrows(_Nrows), Ncols(_Ncols) {}
__device__ float operator()(float& x, int& y ) {
float temp = 0.f;
for (int i = 0; i<Nrows; i++) {
temp += vals[y + (i*Ncols)];
}
return temp;
}
};
/**************************/
/* NEEDED FOR APPROACH #3 */
/**************************/
template<typename T>
struct MulC: public thrust::unary_function<T, T>
{
T C;
__host__ __device__ MulC(T c) : C(c) { }
__host__ __device__ T operator()(T x) { return x * C; }
};
/********/
/* MAIN */
/********/
int main()
{
const int Nrows = 5; // --- Number of rows
const int Ncols = 8; // --- Number of columns
// --- Random uniform integer distribution between 10 and 99
thrust::default_random_engine rng;
thrust::uniform_int_distribution<int> dist(10, 99);
// --- Matrix allocation and initialization
thrust::device_vector<float> d_matrix(Nrows * Ncols);
for (size_t i = 0; i < d_matrix.size(); i++) d_matrix[i] = (float)dist(rng);
TimingGPU timerGPU;
/***************/
/* APPROACH #1 */
/***************/
timerGPU.StartCounter();
// --- Allocate space for row sums and indices
thrust::device_vector<float> d_col_sums(Ncols);
thrust::device_vector<int> d_col_indices(Ncols);
// --- Compute row sums by summing values with equal row indices
thrust::reduce_by_key(thrust::make_transform_iterator(thrust::counting_iterator<int>(0), linear_index_to_row_index<int>(Nrows)),
thrust::make_transform_iterator(thrust::counting_iterator<int>(0), linear_index_to_row_index<int>(Nrows)) + (Nrows*Ncols),
thrust::make_permutation_iterator(
d_matrix.begin(),
thrust::make_transform_iterator(thrust::make_counting_iterator(0),(_1 % Nrows) * Ncols + _1 / Nrows)),
d_col_indices.begin(),
d_col_sums.begin(),
thrust::equal_to<int>(),
thrust::plus<float>());
//thrust::reduce_by_key(
// thrust::make_transform_iterator(thrust::make_counting_iterator(0), linear_index_to_row_index<int>(Nrows)),
// thrust::make_transform_iterator(thrust::make_counting_iterator(0), linear_index_to_row_index<int>(Nrows)) + (Nrows*Ncols),
// thrust::make_permutation_iterator(
// d_matrix.begin(),
// thrust::make_transform_iterator(thrust::make_counting_iterator(0),(_1 % Nrows) * Ncols + _1 / Nrows)),
// thrust::make_discard_iterator(),
// d_col_sums.begin());
printf("Timing for approach #1 = %f\n", timerGPU.GetCounter());
// --- Print result
for(int j = 0; j < Ncols; j++) {
std::cout << "[ ";
for(int i = 0; i < Nrows; i++)
std::cout << d_matrix[i * Ncols + j] << " ";
std::cout << "] = " << d_col_sums[j] << "\n";
}
/***************/
/* APPROACH #2 */
/***************/
timerGPU.StartCounter();
thrust::device_vector<float> d_col_sums_2(Ncols, 0);
float *s_vals = thrust::raw_pointer_cast(&d_matrix[0]);
gpuErrchk(cudaMemcpyToSymbol(vals, &s_vals, sizeof(float *)));
thrust::transform(d_col_sums_2.begin(), d_col_sums_2.end(), thrust::counting_iterator<int>(0), d_col_sums_2.begin(), col_reduction(Nrows, Ncols));
printf("Timing for approach #2 = %f\n", timerGPU.GetCounter());
for(int j = 0; j < Ncols; j++) {
std::cout << "[ ";
for(int i = 0; i < Nrows; i++)
std::cout << d_matrix[i * Ncols + j] << " ";
std::cout << "] = " << d_col_sums_2[j] << "\n";
}
/***************/
/* APPROACH #3 */
/***************/
timerGPU.StartCounter();
thrust::device_vector<float> d_col_sums_3(Ncols, 0);
thrust::device_vector<float> d_temp(Nrows * Ncols);
thrust::inclusive_scan_by_key(
thrust::make_transform_iterator(thrust::make_counting_iterator(0), linear_index_to_row_index<int>(Nrows)),
thrust::make_transform_iterator(thrust::make_counting_iterator(0), linear_index_to_row_index<int>(Nrows)) + (Nrows*Ncols),
thrust::make_permutation_iterator(
d_matrix.begin(),
thrust::make_transform_iterator(thrust::make_counting_iterator(0),(_1 % Nrows) * Ncols + _1 / Nrows)),
d_temp.begin());
thrust::copy(
thrust::make_permutation_iterator(
d_temp.begin() + Nrows - 1,
thrust::make_transform_iterator(thrust::make_counting_iterator(0), MulC<int>(Nrows))),
thrust::make_permutation_iterator(
d_temp.begin() + Nrows - 1,
thrust::make_transform_iterator(thrust::make_counting_iterator(0), MulC<int>(Nrows))) + Ncols,
d_col_sums_3.begin());
printf("Timing for approach #3 = %f\n", timerGPU.GetCounter());
for(int j = 0; j < Ncols; j++) {
std::cout << "[ ";
for(int i = 0; i < Nrows; i++)
std::cout << d_matrix[i * Ncols + j] << " ";
std::cout << "] = " << d_col_sums_3[j] << "\n";
}
/***************/
/* APPROACH #4 */
/***************/
cublasHandle_t handle;
timerGPU.StartCounter();
cublasSafeCall(cublasCreate(&handle));
thrust::device_vector<float> d_col_sums_4(Ncols);
thrust::device_vector<float> d_ones(Nrows, 1.f);
float alpha = 1.f;
float beta = 0.f;
cublasSafeCall(cublasSgemv(handle, CUBLAS_OP_N, Ncols, Nrows, &alpha, thrust::raw_pointer_cast(d_matrix.data()), Ncols,
thrust::raw_pointer_cast(d_ones.data()), 1, &beta, thrust::raw_pointer_cast(d_col_sums_4.data()), 1));
printf("Timing for approach #4 = %f\n", timerGPU.GetCounter());
for(int j = 0; j < Ncols; j++) {
std::cout << "[ ";
for(int i = 0; i < Nrows; i++)
std::cout << d_matrix[i * Ncols + j] << " ";
std::cout << "] = " << d_col_sums_4[j] << "\n";
}
return 0;
}
|
a6aaaf742eec6eb8c3c7715c9e3ee2642fbcadab.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define BLOCK_SIZE 16
#define MAX(a,b) ( a > b ? a : b )
#define MIN(a,b) ( a <= b ? a : b )
#define SIGN(x) ( x >= 0.0 ? 1.0 : -1.0 )
#define ABS(x) ( (x) > 0.0 ? x : -(x) )
#define SQR(x) (x)*(x)
static __global__ void krnl_1(float *pfpt, float *pfps, float *pfu,
float *pfgk, float *pfdv, float cc, int iNx, int iNy)
{
int idx = __mul24(blockIdx.x,blockDim.x)+threadIdx.x;
int idy = __mul24(blockIdx.y,blockDim.y)+threadIdx.y;
if( idx<iNy && idy<iNx )
{
int index = idx + idy*iNy;
pfgk[index] = pfdv[index] - (pfps[index] - pfpt[index] + pfu[index]/cc);
}
}
static __global__ void krnl_2(float *pfbx, float *pfgk, float steps,
int iNx, int iNy)
{
int idx = __mul24(blockIdx.x,blockDim.x)+threadIdx.x;
int idy = __mul24(blockIdx.y,blockDim.y)+threadIdx.y;
if( idx<iNy && idy<(iNx-1) )
{
int index = idx + (idy+1)*iNy;
pfbx[index] = steps*(pfgk[index] - pfgk[index-iNy]) + pfbx[index];
}
}
static __global__ void krnl_3(float *pfby, float *pfgk, float steps,
int iNx, int iNy)
{
int idx = __mul24(blockIdx.x,blockDim.x)+threadIdx.x;
int idy = __mul24(blockIdx.y,blockDim.y)+threadIdx.y;
if( idx<(iNy-1) && idy<iNx )
{
int index = idx + idy*iNy + 1;
pfby[index] = steps*(pfgk[index] - pfgk[index-1]) + pfby[index];
}
}
static __global__ void krnl_4(float *pfbx, float *pfby, float *pfgk, float *pfpenalty,
int iNx, int iNy)
{
int idx = __mul24(blockIdx.x,blockDim.x)+threadIdx.x;
int idy = __mul24(blockIdx.y,blockDim.y)+threadIdx.y;
float fpt;
if( idx<iNy && idy<iNx )
{
int index = idx + idy*iNy;
fpt = sqrt((SQR(pfbx[index]) + SQR(pfbx[index+iNy])
+ SQR(pfby[index]) + SQR(pfby[index+1]))*0.5);
if (fpt > pfpenalty[index])
fpt = fpt / pfpenalty[index];
else
fpt = 1;
pfgk[index] = 1/fpt;
}
}
static __global__ void krnl_5(float *pfbx, float *pfgk, int iNx, int iNy)
{
int idx = __mul24(blockIdx.x,blockDim.x)+threadIdx.x;
int idy = __mul24(blockIdx.y,blockDim.y)+threadIdx.y;
if( idx<iNy && idy<(iNx-1) )
{
int index = idx + (idy+1)*iNy;
pfbx[index] = (pfgk[index] + pfgk[index-iNy])*0.5*pfbx[index];
}
}
static __global__ void krnl_6(float *pfby, float *pfgk,
int iNx, int iNy)
{
int idx = __mul24(blockIdx.x,blockDim.x)+threadIdx.x;
int idy = __mul24(blockIdx.y,blockDim.y)+threadIdx.y;
if( idx<(iNy-1) && idy<iNx )
{
int index = idx + idy*iNy+1;
pfby[index] = 0.5*(pfgk[index] + pfgk[index-1])*pfby[index];
}
}
static __global__ void krnl_7(float *pfbx, float *pfby, float *pfdv,
int iNx, int iNy)
{
int idx = __mul24(blockIdx.x,blockDim.x)+threadIdx.x;
int idy = __mul24(blockIdx.y,blockDim.y)+threadIdx.y;
if( idx<iNy && idy<iNx )
{
int index = idx + idy*iNy;
pfdv[index] = pfbx[index+iNy] - pfbx[index] + pfby[index+1] - pfby[index];
}
}
static __global__ void krnl_8(float *pfps, float *pfpt, float *pfu, float *pfdv,
float *pfCs, float cc, int iNx, int iNy)
{
int idx = __mul24(blockIdx.x,blockDim.x)+threadIdx.x;
int idy = __mul24(blockIdx.y,blockDim.y)+threadIdx.y;
float fpt;
if( idx<iNy && idy<iNx )
{
int index = idx + idy*iNy;
fpt = pfpt[index] - pfu[index]/cc + pfdv[index] + 1/cc;
fpt = MIN(fpt, pfCs[index]);
pfps[index] = fpt;
}
}
static __global__ void krnl_9(float *pfps, float *pfpt, float *pfu, float *pfdv,
float *pfCt, float cc, int iNx, int iNy)
{
int idx = __mul24(blockIdx.x,blockDim.x)+threadIdx.x;
int idy = __mul24(blockIdx.y,blockDim.y)+threadIdx.y;
float fpt;
if( idx<iNy && idy<iNx )
{
int index = idx + idy*iNy;
fpt = pfps[index] + pfu[index]/cc - pfdv[index];
fpt = MIN(fpt, pfCt[index]);
pfpt[index] = fpt;
}
}
static __global__ void krnl_10(float *pfpt, float *pfdv, float *pfps, float *pfu,
float *FPS, float cc, int iNx, int iNy)
{
int idx = __mul24(blockIdx.x,blockDim.x)+threadIdx.x;
int idy = __mul24(blockIdx.y,blockDim.y)+threadIdx.y;
float fpt;
if( idx<iNy && idy<iNx )
{
int index = idx + idy*iNy;
fpt = cc * (pfpt[index] + pfdv[index] - pfps[index]);
FPS[index] = ABS(fpt);
pfu[index] -= fpt;
}
} | a6aaaf742eec6eb8c3c7715c9e3ee2642fbcadab.cu | #define BLOCK_SIZE 16
#define MAX(a,b) ( a > b ? a : b )
#define MIN(a,b) ( a <= b ? a : b )
#define SIGN(x) ( x >= 0.0 ? 1.0 : -1.0 )
#define ABS(x) ( (x) > 0.0 ? x : -(x) )
#define SQR(x) (x)*(x)
static __global__ void krnl_1(float *pfpt, float *pfps, float *pfu,
float *pfgk, float *pfdv, float cc, int iNx, int iNy)
{
int idx = __mul24(blockIdx.x,blockDim.x)+threadIdx.x;
int idy = __mul24(blockIdx.y,blockDim.y)+threadIdx.y;
if( idx<iNy && idy<iNx )
{
int index = idx + idy*iNy;
pfgk[index] = pfdv[index] - (pfps[index] - pfpt[index] + pfu[index]/cc);
}
}
static __global__ void krnl_2(float *pfbx, float *pfgk, float steps,
int iNx, int iNy)
{
int idx = __mul24(blockIdx.x,blockDim.x)+threadIdx.x;
int idy = __mul24(blockIdx.y,blockDim.y)+threadIdx.y;
if( idx<iNy && idy<(iNx-1) )
{
int index = idx + (idy+1)*iNy;
pfbx[index] = steps*(pfgk[index] - pfgk[index-iNy]) + pfbx[index];
}
}
static __global__ void krnl_3(float *pfby, float *pfgk, float steps,
int iNx, int iNy)
{
int idx = __mul24(blockIdx.x,blockDim.x)+threadIdx.x;
int idy = __mul24(blockIdx.y,blockDim.y)+threadIdx.y;
if( idx<(iNy-1) && idy<iNx )
{
int index = idx + idy*iNy + 1;
pfby[index] = steps*(pfgk[index] - pfgk[index-1]) + pfby[index];
}
}
static __global__ void krnl_4(float *pfbx, float *pfby, float *pfgk, float *pfpenalty,
int iNx, int iNy)
{
int idx = __mul24(blockIdx.x,blockDim.x)+threadIdx.x;
int idy = __mul24(blockIdx.y,blockDim.y)+threadIdx.y;
float fpt;
if( idx<iNy && idy<iNx )
{
int index = idx + idy*iNy;
fpt = sqrt((SQR(pfbx[index]) + SQR(pfbx[index+iNy])
+ SQR(pfby[index]) + SQR(pfby[index+1]))*0.5);
if (fpt > pfpenalty[index])
fpt = fpt / pfpenalty[index];
else
fpt = 1;
pfgk[index] = 1/fpt;
}
}
static __global__ void krnl_5(float *pfbx, float *pfgk, int iNx, int iNy)
{
int idx = __mul24(blockIdx.x,blockDim.x)+threadIdx.x;
int idy = __mul24(blockIdx.y,blockDim.y)+threadIdx.y;
if( idx<iNy && idy<(iNx-1) )
{
int index = idx + (idy+1)*iNy;
pfbx[index] = (pfgk[index] + pfgk[index-iNy])*0.5*pfbx[index];
}
}
static __global__ void krnl_6(float *pfby, float *pfgk,
int iNx, int iNy)
{
int idx = __mul24(blockIdx.x,blockDim.x)+threadIdx.x;
int idy = __mul24(blockIdx.y,blockDim.y)+threadIdx.y;
if( idx<(iNy-1) && idy<iNx )
{
int index = idx + idy*iNy+1;
pfby[index] = 0.5*(pfgk[index] + pfgk[index-1])*pfby[index];
}
}
static __global__ void krnl_7(float *pfbx, float *pfby, float *pfdv,
int iNx, int iNy)
{
int idx = __mul24(blockIdx.x,blockDim.x)+threadIdx.x;
int idy = __mul24(blockIdx.y,blockDim.y)+threadIdx.y;
if( idx<iNy && idy<iNx )
{
int index = idx + idy*iNy;
pfdv[index] = pfbx[index+iNy] - pfbx[index] + pfby[index+1] - pfby[index];
}
}
static __global__ void krnl_8(float *pfps, float *pfpt, float *pfu, float *pfdv,
float *pfCs, float cc, int iNx, int iNy)
{
int idx = __mul24(blockIdx.x,blockDim.x)+threadIdx.x;
int idy = __mul24(blockIdx.y,blockDim.y)+threadIdx.y;
float fpt;
if( idx<iNy && idy<iNx )
{
int index = idx + idy*iNy;
fpt = pfpt[index] - pfu[index]/cc + pfdv[index] + 1/cc;
fpt = MIN(fpt, pfCs[index]);
pfps[index] = fpt;
}
}
static __global__ void krnl_9(float *pfps, float *pfpt, float *pfu, float *pfdv,
float *pfCt, float cc, int iNx, int iNy)
{
int idx = __mul24(blockIdx.x,blockDim.x)+threadIdx.x;
int idy = __mul24(blockIdx.y,blockDim.y)+threadIdx.y;
float fpt;
if( idx<iNy && idy<iNx )
{
int index = idx + idy*iNy;
fpt = pfps[index] + pfu[index]/cc - pfdv[index];
fpt = MIN(fpt, pfCt[index]);
pfpt[index] = fpt;
}
}
static __global__ void krnl_10(float *pfpt, float *pfdv, float *pfps, float *pfu,
float *FPS, float cc, int iNx, int iNy)
{
int idx = __mul24(blockIdx.x,blockDim.x)+threadIdx.x;
int idy = __mul24(blockIdx.y,blockDim.y)+threadIdx.y;
float fpt;
if( idx<iNy && idy<iNx )
{
int index = idx + idy*iNy;
fpt = cc * (pfpt[index] + pfdv[index] - pfps[index]);
FPS[index] = ABS(fpt);
pfu[index] -= fpt;
}
} |
fbadbc6e54c1f7ea76c81a3968e6d218bdb584a8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
extern "C" {
texture<unsigned char, 2> uchar_tex;
texture<uchar2, 2> uchar2_tex;
texture<unsigned short, 2> ushort_tex;
texture<ushort2, 2> ushort2_tex;
}
__global__ void Thumbnail_ushort(int *histogram, int src_width, int src_height)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y < src_height && x < src_width)
{
unsigned short pixel = (tex2D(ushort_tex, x, y) + 128) >> 8;
atomicAdd(&histogram[pixel], 1);
}
} | fbadbc6e54c1f7ea76c81a3968e6d218bdb584a8.cu | #include "includes.h"
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
extern "C" {
texture<unsigned char, 2> uchar_tex;
texture<uchar2, 2> uchar2_tex;
texture<unsigned short, 2> ushort_tex;
texture<ushort2, 2> ushort2_tex;
}
__global__ void Thumbnail_ushort(int *histogram, int src_width, int src_height)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y < src_height && x < src_width)
{
unsigned short pixel = (tex2D(ushort_tex, x, y) + 128) >> 8;
atomicAdd(&histogram[pixel], 1);
}
} |
e0c4b21f8aea6af485b6508dccdc6142bd12d8a1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void printNumber(int number)
{
printf("%d\n", number);
}
int main()
{
for (int i = 0; i < 5; ++i)
{
hipStream_t stream;
hipStreamCreate(&stream);
hipLaunchKernelGGL(( printNumber), dim3(1), dim3(1), 0, stream, i);
hipStreamDestroy(stream);
}
hipDeviceSynchronize();
}
| e0c4b21f8aea6af485b6508dccdc6142bd12d8a1.cu | #include <stdio.h>
__global__ void printNumber(int number)
{
printf("%d\n", number);
}
int main()
{
for (int i = 0; i < 5; ++i)
{
cudaStream_t stream;
cudaStreamCreate(&stream);
printNumber<<<1, 1, 0, stream>>>(i);
cudaStreamDestroy(stream);
}
cudaDeviceSynchronize();
}
|
51e6901587706cdd841d7d9fe54cf4a2c2fb33f7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Matrix multiplication: P = M * N.
* Device code.
Author: Naga Kandasamy
Date: 2/16/2017
*/
#ifndef _MATRIXMUL_KERNEL_H_
#define _MATRIXMUL_KERNEL_H_
#define TILE_SIZE 32
#include "matrixmul.h"
__global__ void
MatrixMulKernel(float* P, const float* M, const float* N, int matrix_size)
{
// Thread index
int threadX = threadIdx.x;
int threadY = threadIdx.y;
// Block index
int blockX = blockIdx.y;
//int blockY = blockIdx.y;
// Find position in Matrix
//int column_number = blockDim.x * blockX + threadX;
int row_number = ((TILE_SIZE)*(TILE_SIZE) * blockX) + threadY*(TILE_SIZE) + threadX;
double P_temp = 0;
for (int k = 0; k < matrix_size; k++) {
double M_element = M[matrix_size * row_number + k]; // Scan through row elements
//double N_element = N[matrix_size * k + column_number];
double N_element = N[k];
P_temp += M_element * N_element;
}
// Write result to P
//P[row_number * matrix_size + column_number] = (float)P_temp;
P[row_number] = (float)P_temp;
}
#endif
| 51e6901587706cdd841d7d9fe54cf4a2c2fb33f7.cu | /* Matrix multiplication: P = M * N.
* Device code.
Author: Naga Kandasamy
Date: 2/16/2017
*/
#ifndef _MATRIXMUL_KERNEL_H_
#define _MATRIXMUL_KERNEL_H_
#define TILE_SIZE 32
#include "matrixmul.h"
__global__ void
MatrixMulKernel(float* P, const float* M, const float* N, int matrix_size)
{
// Thread index
int threadX = threadIdx.x;
int threadY = threadIdx.y;
// Block index
int blockX = blockIdx.y;
//int blockY = blockIdx.y;
// Find position in Matrix
//int column_number = blockDim.x * blockX + threadX;
int row_number = ((TILE_SIZE)*(TILE_SIZE) * blockX) + threadY*(TILE_SIZE) + threadX;
double P_temp = 0;
for (int k = 0; k < matrix_size; k++) {
double M_element = M[matrix_size * row_number + k]; // Scan through row elements
//double N_element = N[matrix_size * k + column_number];
double N_element = N[k];
P_temp += M_element * N_element;
}
// Write result to P
//P[row_number * matrix_size + column_number] = (float)P_temp;
P[row_number] = (float)P_temp;
}
#endif
|
unsorted_segment_min.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2023 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/unsorted_segment_min.cuh"
#include <limits>
#include "include/hip/hip_fp16.h"
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/util.cuh"
template <typename T>
__device__ __forceinline__ void max_val_init(T *init_val) {
*init_val = std::numeric_limits<T>::max();
}
// Handle fp16 differently for assignment
template <>
__device__ __forceinline__ void max_val_init(half *init_val) {
*init_val = __int2half_rd(65504); // Max value for Half
}
template <typename T, typename S>
__global__ void UnsortedSegmentMinCal(size_t input_dim0, size_t input_dim1, size_t output_dim0, size_t output_dim1,
T *input_addr, S *ids_addr, T *output_addr) {
for (int input_index = blockIdx.x * blockDim.x + threadIdx.x; input_index < input_dim0 * input_dim1;
input_index += blockDim.x * gridDim.x) {
size_t j = input_index / input_dim1;
size_t k = input_index % input_dim1;
S i = ids_addr[j];
if (i < 0 || i >= output_dim0) {
continue;
}
size_t output_index = i * output_dim1 + k;
MsAtomicMin(output_addr + output_index, input_addr[input_index]);
}
}
template <typename T>
__global__ void UnsortedSegmentMinInit(size_t size, T *output_addr) {
T init_value = static_cast<T>(0);
max_val_init(&init_value);
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < size; index += blockDim.x * gridDim.x) {
output_addr[index] = init_value;
}
}
template <typename T, typename S>
void UnsortedSegmentMin(size_t input_dim0, size_t input_dim1, size_t output_dim0, size_t output_dim1, T *input_addr,
S *ids_addr, T *output_addr, hipStream_t stream, const uint32_t &device_id) {
size_t out_size = output_dim0 * output_dim1;
hipLaunchKernelGGL(( UnsortedSegmentMinInit), dim3(CUDA_BLOCKS(device_id, out_size)), dim3(CUDA_THREADS(device_id)), 0, stream, out_size,
output_addr);
size_t in_size = input_dim0 * input_dim1;
hipLaunchKernelGGL(( UnsortedSegmentMinCal), dim3(CUDA_BLOCKS(device_id, in_size)), dim3(CUDA_THREADS(device_id)), 0, stream,
input_dim0, input_dim1, output_dim0, output_dim1, input_addr, ids_addr, output_addr);
return;
}
template CUDA_LIB_EXPORT void UnsortedSegmentMin(size_t input_dim0, size_t input_dim1, size_t output_dim0,
size_t output_dim1, double *input_addr, int *ids_addr,
double *output_addr, hipStream_t stream, const uint32_t &device_id);
template CUDA_LIB_EXPORT void UnsortedSegmentMin(size_t input_dim0, size_t input_dim1, size_t output_dim0,
size_t output_dim1, double *input_addr, int64_t *ids_addr,
double *output_addr, hipStream_t stream, const uint32_t &device_id);
template CUDA_LIB_EXPORT void UnsortedSegmentMin(size_t input_dim0, size_t input_dim1, size_t output_dim0,
size_t output_dim1, float *input_addr, int *ids_addr,
float *output_addr, hipStream_t stream, const uint32_t &device_id);
template CUDA_LIB_EXPORT void UnsortedSegmentMin(size_t input_dim0, size_t input_dim1, size_t output_dim0,
size_t output_dim1, float *input_addr, int64_t *ids_addr,
float *output_addr, hipStream_t stream, const uint32_t &device_id);
template CUDA_LIB_EXPORT void UnsortedSegmentMin(size_t input_dim0, size_t input_dim1, size_t output_dim0,
size_t output_dim1, half *input_addr, int *ids_addr, half *output_addr,
hipStream_t stream, const uint32_t &device_id);
template CUDA_LIB_EXPORT void UnsortedSegmentMin(size_t input_dim0, size_t input_dim1, size_t output_dim0,
size_t output_dim1, half *input_addr, int64_t *ids_addr,
half *output_addr, hipStream_t stream, const uint32_t &device_id);
template CUDA_LIB_EXPORT void UnsortedSegmentMin(size_t input_dim0, size_t input_dim1, size_t output_dim0,
size_t output_dim1, int8_t *input_addr, int *ids_addr,
int8_t *output_addr, hipStream_t stream, const uint32_t &device_id);
template CUDA_LIB_EXPORT void UnsortedSegmentMin(size_t input_dim0, size_t input_dim1, size_t output_dim0,
size_t output_dim1, int8_t *input_addr, int64_t *ids_addr,
int8_t *output_addr, hipStream_t stream, const uint32_t &device_id);
template CUDA_LIB_EXPORT void UnsortedSegmentMin(size_t input_dim0, size_t input_dim1, size_t output_dim0,
size_t output_dim1, int16_t *input_addr, int *ids_addr,
int16_t *output_addr, hipStream_t stream, const uint32_t &device_id);
template CUDA_LIB_EXPORT void UnsortedSegmentMin(size_t input_dim0, size_t input_dim1, size_t output_dim0,
size_t output_dim1, int16_t *input_addr, int64_t *ids_addr,
int16_t *output_addr, hipStream_t stream, const uint32_t &device_id);
template CUDA_LIB_EXPORT void UnsortedSegmentMin(size_t input_dim0, size_t input_dim1, size_t output_dim0,
size_t output_dim1, int *input_addr, int *ids_addr, int *output_addr,
hipStream_t stream, const uint32_t &device_id);
template CUDA_LIB_EXPORT void UnsortedSegmentMin(size_t input_dim0, size_t input_dim1, size_t output_dim0,
size_t output_dim1, int *input_addr, int64_t *ids_addr,
int *output_addr, hipStream_t stream, const uint32_t &device_id);
template CUDA_LIB_EXPORT void UnsortedSegmentMin(size_t input_dim0, size_t input_dim1, size_t output_dim0,
size_t output_dim1, int64_t *input_addr, int *ids_addr,
int64_t *output_addr, hipStream_t stream, const uint32_t &device_id);
template CUDA_LIB_EXPORT void UnsortedSegmentMin(size_t input_dim0, size_t input_dim1, size_t output_dim0,
size_t output_dim1, int64_t *input_addr, int64_t *ids_addr,
int64_t *output_addr, hipStream_t stream, const uint32_t &device_id);
template CUDA_LIB_EXPORT void UnsortedSegmentMin(size_t input_dim0, size_t input_dim1, size_t output_dim0,
size_t output_dim1, uint8_t *input_addr, int *ids_addr,
uint8_t *output_addr, hipStream_t stream, const uint32_t &device_id);
template CUDA_LIB_EXPORT void UnsortedSegmentMin(size_t input_dim0, size_t input_dim1, size_t output_dim0,
size_t output_dim1, uint8_t *input_addr, int64_t *ids_addr,
uint8_t *output_addr, hipStream_t stream, const uint32_t &device_id);
template CUDA_LIB_EXPORT void UnsortedSegmentMin(size_t input_dim0, size_t input_dim1, size_t output_dim0,
size_t output_dim1, uint16_t *input_addr, int *ids_addr,
uint16_t *output_addr, hipStream_t stream, const uint32_t &device_id);
template CUDA_LIB_EXPORT void UnsortedSegmentMin(size_t input_dim0, size_t input_dim1, size_t output_dim0,
size_t output_dim1, uint16_t *input_addr, int64_t *ids_addr,
uint16_t *output_addr, hipStream_t stream, const uint32_t &device_id);
template CUDA_LIB_EXPORT void UnsortedSegmentMin(size_t input_dim0, size_t input_dim1, size_t output_dim0,
size_t output_dim1, uint32_t *input_addr, int *ids_addr,
uint32_t *output_addr, hipStream_t stream, const uint32_t &device_id);
template CUDA_LIB_EXPORT void UnsortedSegmentMin(size_t input_dim0, size_t input_dim1, size_t output_dim0,
size_t output_dim1, uint32_t *input_addr, int64_t *ids_addr,
uint32_t *output_addr, hipStream_t stream, const uint32_t &device_id);
template CUDA_LIB_EXPORT void UnsortedSegmentMin(size_t input_dim0, size_t input_dim1, size_t output_dim0,
size_t output_dim1, uint64_t *input_addr, int *ids_addr,
uint64_t *output_addr, hipStream_t stream, const uint32_t &device_id);
template CUDA_LIB_EXPORT void UnsortedSegmentMin(size_t input_dim0, size_t input_dim1, size_t output_dim0,
size_t output_dim1, uint64_t *input_addr, int64_t *ids_addr,
uint64_t *output_addr, hipStream_t stream, const uint32_t &device_id);
| unsorted_segment_min.cu | /**
* Copyright 2023 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/unsorted_segment_min.cuh"
#include <limits>
#include "include/cuda_fp16.h"
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/util.cuh"
template <typename T>
__device__ __forceinline__ void max_val_init(T *init_val) {
*init_val = std::numeric_limits<T>::max();
}
// Handle fp16 differently for assignment
template <>
__device__ __forceinline__ void max_val_init(half *init_val) {
*init_val = __int2half_rd(65504); // Max value for Half
}
template <typename T, typename S>
__global__ void UnsortedSegmentMinCal(size_t input_dim0, size_t input_dim1, size_t output_dim0, size_t output_dim1,
T *input_addr, S *ids_addr, T *output_addr) {
for (int input_index = blockIdx.x * blockDim.x + threadIdx.x; input_index < input_dim0 * input_dim1;
input_index += blockDim.x * gridDim.x) {
size_t j = input_index / input_dim1;
size_t k = input_index % input_dim1;
S i = ids_addr[j];
if (i < 0 || i >= output_dim0) {
continue;
}
size_t output_index = i * output_dim1 + k;
MsAtomicMin(output_addr + output_index, input_addr[input_index]);
}
}
template <typename T>
__global__ void UnsortedSegmentMinInit(size_t size, T *output_addr) {
T init_value = static_cast<T>(0);
max_val_init(&init_value);
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < size; index += blockDim.x * gridDim.x) {
output_addr[index] = init_value;
}
}
template <typename T, typename S>
void UnsortedSegmentMin(size_t input_dim0, size_t input_dim1, size_t output_dim0, size_t output_dim1, T *input_addr,
S *ids_addr, T *output_addr, cudaStream_t stream, const uint32_t &device_id) {
size_t out_size = output_dim0 * output_dim1;
UnsortedSegmentMinInit<<<CUDA_BLOCKS(device_id, out_size), CUDA_THREADS(device_id), 0, stream>>>(out_size,
output_addr);
size_t in_size = input_dim0 * input_dim1;
UnsortedSegmentMinCal<<<CUDA_BLOCKS(device_id, in_size), CUDA_THREADS(device_id), 0, stream>>>(
input_dim0, input_dim1, output_dim0, output_dim1, input_addr, ids_addr, output_addr);
return;
}
template CUDA_LIB_EXPORT void UnsortedSegmentMin(size_t input_dim0, size_t input_dim1, size_t output_dim0,
size_t output_dim1, double *input_addr, int *ids_addr,
double *output_addr, cudaStream_t stream, const uint32_t &device_id);
template CUDA_LIB_EXPORT void UnsortedSegmentMin(size_t input_dim0, size_t input_dim1, size_t output_dim0,
size_t output_dim1, double *input_addr, int64_t *ids_addr,
double *output_addr, cudaStream_t stream, const uint32_t &device_id);
template CUDA_LIB_EXPORT void UnsortedSegmentMin(size_t input_dim0, size_t input_dim1, size_t output_dim0,
size_t output_dim1, float *input_addr, int *ids_addr,
float *output_addr, cudaStream_t stream, const uint32_t &device_id);
template CUDA_LIB_EXPORT void UnsortedSegmentMin(size_t input_dim0, size_t input_dim1, size_t output_dim0,
size_t output_dim1, float *input_addr, int64_t *ids_addr,
float *output_addr, cudaStream_t stream, const uint32_t &device_id);
template CUDA_LIB_EXPORT void UnsortedSegmentMin(size_t input_dim0, size_t input_dim1, size_t output_dim0,
size_t output_dim1, half *input_addr, int *ids_addr, half *output_addr,
cudaStream_t stream, const uint32_t &device_id);
template CUDA_LIB_EXPORT void UnsortedSegmentMin(size_t input_dim0, size_t input_dim1, size_t output_dim0,
size_t output_dim1, half *input_addr, int64_t *ids_addr,
half *output_addr, cudaStream_t stream, const uint32_t &device_id);
template CUDA_LIB_EXPORT void UnsortedSegmentMin(size_t input_dim0, size_t input_dim1, size_t output_dim0,
size_t output_dim1, int8_t *input_addr, int *ids_addr,
int8_t *output_addr, cudaStream_t stream, const uint32_t &device_id);
template CUDA_LIB_EXPORT void UnsortedSegmentMin(size_t input_dim0, size_t input_dim1, size_t output_dim0,
size_t output_dim1, int8_t *input_addr, int64_t *ids_addr,
int8_t *output_addr, cudaStream_t stream, const uint32_t &device_id);
template CUDA_LIB_EXPORT void UnsortedSegmentMin(size_t input_dim0, size_t input_dim1, size_t output_dim0,
size_t output_dim1, int16_t *input_addr, int *ids_addr,
int16_t *output_addr, cudaStream_t stream, const uint32_t &device_id);
template CUDA_LIB_EXPORT void UnsortedSegmentMin(size_t input_dim0, size_t input_dim1, size_t output_dim0,
size_t output_dim1, int16_t *input_addr, int64_t *ids_addr,
int16_t *output_addr, cudaStream_t stream, const uint32_t &device_id);
template CUDA_LIB_EXPORT void UnsortedSegmentMin(size_t input_dim0, size_t input_dim1, size_t output_dim0,
size_t output_dim1, int *input_addr, int *ids_addr, int *output_addr,
cudaStream_t stream, const uint32_t &device_id);
template CUDA_LIB_EXPORT void UnsortedSegmentMin(size_t input_dim0, size_t input_dim1, size_t output_dim0,
size_t output_dim1, int *input_addr, int64_t *ids_addr,
int *output_addr, cudaStream_t stream, const uint32_t &device_id);
template CUDA_LIB_EXPORT void UnsortedSegmentMin(size_t input_dim0, size_t input_dim1, size_t output_dim0,
size_t output_dim1, int64_t *input_addr, int *ids_addr,
int64_t *output_addr, cudaStream_t stream, const uint32_t &device_id);
template CUDA_LIB_EXPORT void UnsortedSegmentMin(size_t input_dim0, size_t input_dim1, size_t output_dim0,
size_t output_dim1, int64_t *input_addr, int64_t *ids_addr,
int64_t *output_addr, cudaStream_t stream, const uint32_t &device_id);
template CUDA_LIB_EXPORT void UnsortedSegmentMin(size_t input_dim0, size_t input_dim1, size_t output_dim0,
size_t output_dim1, uint8_t *input_addr, int *ids_addr,
uint8_t *output_addr, cudaStream_t stream, const uint32_t &device_id);
template CUDA_LIB_EXPORT void UnsortedSegmentMin(size_t input_dim0, size_t input_dim1, size_t output_dim0,
size_t output_dim1, uint8_t *input_addr, int64_t *ids_addr,
uint8_t *output_addr, cudaStream_t stream, const uint32_t &device_id);
template CUDA_LIB_EXPORT void UnsortedSegmentMin(size_t input_dim0, size_t input_dim1, size_t output_dim0,
size_t output_dim1, uint16_t *input_addr, int *ids_addr,
uint16_t *output_addr, cudaStream_t stream, const uint32_t &device_id);
template CUDA_LIB_EXPORT void UnsortedSegmentMin(size_t input_dim0, size_t input_dim1, size_t output_dim0,
size_t output_dim1, uint16_t *input_addr, int64_t *ids_addr,
uint16_t *output_addr, cudaStream_t stream, const uint32_t &device_id);
template CUDA_LIB_EXPORT void UnsortedSegmentMin(size_t input_dim0, size_t input_dim1, size_t output_dim0,
size_t output_dim1, uint32_t *input_addr, int *ids_addr,
uint32_t *output_addr, cudaStream_t stream, const uint32_t &device_id);
template CUDA_LIB_EXPORT void UnsortedSegmentMin(size_t input_dim0, size_t input_dim1, size_t output_dim0,
size_t output_dim1, uint32_t *input_addr, int64_t *ids_addr,
uint32_t *output_addr, cudaStream_t stream, const uint32_t &device_id);
template CUDA_LIB_EXPORT void UnsortedSegmentMin(size_t input_dim0, size_t input_dim1, size_t output_dim0,
size_t output_dim1, uint64_t *input_addr, int *ids_addr,
uint64_t *output_addr, cudaStream_t stream, const uint32_t &device_id);
template CUDA_LIB_EXPORT void UnsortedSegmentMin(size_t input_dim0, size_t input_dim1, size_t output_dim0,
size_t output_dim1, uint64_t *input_addr, int64_t *ids_addr,
uint64_t *output_addr, cudaStream_t stream, const uint32_t &device_id);
|
e994bedc186471cb4d4c99426b47bb80387f36e0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <io/utilities/block_utils.cuh>
#include "parquet_gpu.h"
namespace cudf {
namespace io {
namespace parquet {
namespace gpu {
// Minimal thrift implementation for parsing page headers
static const __device__ __constant__ uint8_t g_list2struct[16] = {0,
1,
2,
ST_FLD_BYTE,
ST_FLD_DOUBLE,
5,
ST_FLD_I16,
7,
ST_FLD_I32,
9,
ST_FLD_I64,
ST_FLD_BINARY,
ST_FLD_STRUCT,
ST_FLD_MAP,
ST_FLD_SET,
ST_FLD_LIST};
struct byte_stream_s {
const uint8_t *cur;
const uint8_t *end;
const uint8_t *base;
// Parsed symbols
PageType page_type;
PageInfo page;
ColumnChunkDesc ck;
};
inline __device__ unsigned int getb(byte_stream_s *bs)
{
return (bs->cur < bs->end) ? *bs->cur++ : 0;
}
inline __device__ void skip_bytes(byte_stream_s *bs, size_t bytecnt)
{
bytecnt = min(bytecnt, (size_t)(bs->end - bs->cur));
bs->cur += bytecnt;
}
__device__ uint32_t get_u32(byte_stream_s *bs)
{
uint32_t v = 0, l = 0, c;
do {
c = getb(bs);
v |= (c & 0x7f) << l;
l += 7;
} while (c & 0x80);
return v;
}
inline __device__ int32_t get_i32(byte_stream_s *bs)
{
uint32_t u = get_u32(bs);
return (int32_t)((u >> 1u) ^ -(int32_t)(u & 1));
}
__device__ void skip_struct_field(byte_stream_s *bs, int t)
{
int struct_depth = 0;
int rep_cnt = 0;
do {
if (rep_cnt != 0) {
rep_cnt--;
} else if (struct_depth != 0) {
int c;
do {
c = getb(bs);
if (!c) --struct_depth;
} while (!c && struct_depth);
if (!struct_depth) break;
t = c & 0xf;
if (!(c & 0xf0)) get_i32(bs);
}
switch (t) {
case ST_FLD_TRUE:
case ST_FLD_FALSE: break;
case ST_FLD_I16:
case ST_FLD_I32:
case ST_FLD_I64: get_u32(bs); break;
case ST_FLD_BYTE: skip_bytes(bs, 1); break;
case ST_FLD_DOUBLE: skip_bytes(bs, 8); break;
case ST_FLD_BINARY: skip_bytes(bs, get_u32(bs)); break;
case ST_FLD_LIST:
case ST_FLD_SET: { // NOTE: skipping a list of lists is not handled
int c = getb(bs);
int n = c >> 4;
if (n == 0xf) n = get_u32(bs);
t = g_list2struct[c & 0xf];
if (t == ST_FLD_STRUCT)
struct_depth += n;
else
rep_cnt = n;
} break;
case ST_FLD_STRUCT: struct_depth++; break;
}
} while (rep_cnt || struct_depth);
}
#define PARQUET_BEGIN_STRUCT(fn) \
__device__ bool fn(byte_stream_s *bs) \
{ \
int fld = 0; \
for (;;) { \
int c, t, f; \
c = getb(bs); \
if (!c) break; \
f = c >> 4; \
t = c & 0xf; \
fld = (f) ? fld + f : get_i32(bs); \
switch (fld) {
#define PARQUET_FLD_ENUM(id, m, mt) \
case id: \
bs->m = (mt)get_i32(bs); \
if (t != ST_FLD_I32) return false; \
break;
#define PARQUET_FLD_INT32(id, m) \
case id: \
bs->m = get_i32(bs); \
if (t != ST_FLD_I32) return false; \
break;
#define PARQUET_FLD_STRUCT(id, m) \
case id: \
if (t != ST_FLD_STRUCT || !m(bs)) return false; \
break;
#define PARQUET_END_STRUCT() \
default: skip_struct_field(bs, t); break; \
} \
} \
return true; \
}
PARQUET_BEGIN_STRUCT(gpuParseDataPageHeader)
PARQUET_FLD_INT32(1, page.num_input_values)
PARQUET_FLD_ENUM(2, page.encoding, Encoding);
PARQUET_FLD_ENUM(3, page.definition_level_encoding, Encoding);
PARQUET_FLD_ENUM(4, page.repetition_level_encoding, Encoding);
PARQUET_END_STRUCT()
PARQUET_BEGIN_STRUCT(gpuParseDictionaryPageHeader)
PARQUET_FLD_INT32(1, page.num_input_values)
PARQUET_FLD_ENUM(2, page.encoding, Encoding);
PARQUET_END_STRUCT()
PARQUET_BEGIN_STRUCT(gpuParseDataPageHeaderV2)
PARQUET_FLD_INT32(1, page.num_input_values)
PARQUET_FLD_INT32(3, page.num_rows)
PARQUET_FLD_ENUM(4, page.encoding, Encoding);
PARQUET_FLD_ENUM(5, page.definition_level_encoding, Encoding);
PARQUET_FLD_ENUM(6, page.repetition_level_encoding, Encoding);
PARQUET_END_STRUCT()
PARQUET_BEGIN_STRUCT(gpuParsePageHeader)
PARQUET_FLD_ENUM(1, page_type, PageType)
PARQUET_FLD_INT32(2, page.uncompressed_page_size)
PARQUET_FLD_INT32(3, page.compressed_page_size)
PARQUET_FLD_STRUCT(5, gpuParseDataPageHeader)
PARQUET_FLD_STRUCT(7, gpuParseDictionaryPageHeader)
PARQUET_FLD_STRUCT(8, gpuParseDataPageHeaderV2)
PARQUET_END_STRUCT()
/**
* @brief Kernel for outputting page headers from the specified column chunks
*
* @param[in] chunks List of column chunks
* @param[in] num_chunks Number of column chunks
*/
// blockDim {128,1,1}
extern "C" __global__ void __launch_bounds__(128)
gpuDecodePageHeaders(ColumnChunkDesc *chunks, int32_t num_chunks)
{
__shared__ byte_stream_s bs_g[4];
int t = threadIdx.x & 0x1f;
int chunk = (blockIdx.x << 2) + (threadIdx.x >> 5);
byte_stream_s *const bs = &bs_g[threadIdx.x >> 5];
if (chunk < num_chunks) {
// NOTE: Assumes that sizeof(ColumnChunkDesc) <= 128
if (t < sizeof(ColumnChunkDesc) / sizeof(uint32_t)) {
((uint32_t *)&bs->ck)[t] = ((const uint32_t *)&chunks[chunk])[t];
}
}
__syncthreads();
if (chunk < num_chunks) {
size_t num_values, values_found;
uint32_t data_page_count = 0;
uint32_t dictionary_page_count = 0;
int32_t max_num_pages;
int32_t num_dict_pages = bs->ck.num_dict_pages;
PageInfo *page_info;
if (!t) {
bs->base = bs->cur = bs->ck.compressed_data;
bs->end = bs->base + bs->ck.compressed_size;
bs->page.chunk_idx = chunk;
bs->page.column_idx = bs->ck.dst_col_index;
// this computation is only valid for flat schemas. for nested schemas,
// they will be recomputed in the preprocess step by examining repetition and
// definition levels
bs->page.chunk_row = 0;
bs->page.num_rows = 0;
}
num_values = bs->ck.num_values;
page_info = bs->ck.page_info;
num_dict_pages = bs->ck.num_dict_pages;
max_num_pages = (page_info) ? bs->ck.max_num_pages : 0;
values_found = 0;
SYNCWARP();
while (values_found < num_values && bs->cur < bs->end) {
int index_out = -1;
if (t == 0) {
// this computation is only valid for flat schemas. for nested schemas,
// they will be recomputed in the preprocess step by examining repetition and
// definition levels
bs->page.chunk_row += bs->page.num_rows;
bs->page.num_rows = 0;
if (gpuParsePageHeader(bs) && bs->page.compressed_page_size >= 0) {
switch (bs->page_type) {
case DATA_PAGE:
// this computation is only valid for flat schemas. for nested schemas,
// they will be recomputed in the preprocess step by examining repetition and
// definition levels
bs->page.num_rows = bs->page.num_input_values;
case DATA_PAGE_V2:
index_out = num_dict_pages + data_page_count;
data_page_count++;
bs->page.flags = 0;
values_found += bs->page.num_input_values;
break;
case DICTIONARY_PAGE:
index_out = dictionary_page_count;
dictionary_page_count++;
bs->page.flags = PAGEINFO_FLAGS_DICTIONARY;
break;
default: index_out = -1; break;
}
bs->page.page_data = const_cast<uint8_t *>(bs->cur);
bs->cur += bs->page.compressed_page_size;
} else {
bs->cur = bs->end;
}
}
index_out = SHFL0(index_out);
if (index_out >= 0 && index_out < max_num_pages) {
// NOTE: Assumes that sizeof(PageInfo) <= 128
if (t < sizeof(PageInfo) / sizeof(uint32_t)) {
((uint32_t *)(page_info + index_out))[t] = ((const uint32_t *)&bs->page)[t];
}
}
num_values = SHFL0(num_values);
SYNCWARP();
}
if (t == 0) {
chunks[chunk].num_data_pages = data_page_count;
chunks[chunk].num_dict_pages = dictionary_page_count;
}
}
}
/**
* @brief Kernel for building dictionary index for the specified column chunks
*
* This function builds an index to point to each dictionary entry
* (string format is 4-byte little-endian string length followed by character
* data). The index is a 32-bit integer which contains the offset of each string
* relative to the beginning of the dictionary page data.
*
* @param[in] chunks List of column chunks
* @param[in] num_chunks Number of column chunks
*/
// blockDim {128,1,1}
extern "C" __global__ void __launch_bounds__(128)
gpuBuildStringDictionaryIndex(ColumnChunkDesc *chunks, int32_t num_chunks)
{
__shared__ ColumnChunkDesc chunk_g[4];
int t = threadIdx.x & 0x1f;
int chunk = (blockIdx.x << 2) + (threadIdx.x >> 5);
ColumnChunkDesc *const ck = &chunk_g[threadIdx.x >> 5];
if (chunk < num_chunks) {
// NOTE: Assumes that sizeof(ColumnChunkDesc) <= 128
if (t < sizeof(ColumnChunkDesc) / sizeof(uint32_t)) {
((uint32_t *)ck)[t] = ((const uint32_t *)&chunks[chunk])[t];
}
}
__syncthreads();
if (chunk >= num_chunks) { return; }
if (!t && ck->num_dict_pages > 0 && ck->str_dict_index) {
// Data type to describe a string
nvstrdesc_s *dict_index = ck->str_dict_index;
const uint8_t *dict = ck->page_info[0].page_data;
int dict_size = ck->page_info[0].uncompressed_page_size;
int num_entries = ck->page_info[0].num_input_values;
int pos = 0, cur = 0;
for (int i = 0; i < num_entries; i++) {
int len = 0;
if (cur + 4 <= dict_size) {
len = dict[cur + 0] | (dict[cur + 1] << 8) | (dict[cur + 2] << 16) | (dict[cur + 3] << 24);
if (len >= 0 && cur + 4 + len <= dict_size) {
pos = cur;
cur = cur + 4 + len;
} else {
cur = dict_size;
}
}
// TODO: Could store 8 entries in shared mem, then do a single warp-wide store
dict_index[i].ptr = (const char *)(dict + pos + 4);
dict_index[i].count = len;
}
}
}
hipError_t __host__ DecodePageHeaders(ColumnChunkDesc *chunks,
int32_t num_chunks,
hipStream_t stream)
{
dim3 dim_block(128, 1);
dim3 dim_grid((num_chunks + 3) >> 2, 1); // 1 chunk per warp, 4 warps per block
hipLaunchKernelGGL(( gpuDecodePageHeaders), dim3(dim_grid), dim3(dim_block), 0, stream, chunks, num_chunks);
return hipSuccess;
}
hipError_t __host__ BuildStringDictionaryIndex(ColumnChunkDesc *chunks,
int32_t num_chunks,
hipStream_t stream)
{
dim3 dim_block(128, 1);
dim3 dim_grid((num_chunks + 3) >> 2, 1); // 1 chunk per warp, 4 warps per block
hipLaunchKernelGGL(( gpuBuildStringDictionaryIndex), dim3(dim_grid), dim3(dim_block), 0, stream, chunks, num_chunks);
return hipSuccess;
}
} // namespace gpu
} // namespace parquet
} // namespace io
} // namespace cudf
| e994bedc186471cb4d4c99426b47bb80387f36e0.cu | /*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <io/utilities/block_utils.cuh>
#include "parquet_gpu.h"
namespace cudf {
namespace io {
namespace parquet {
namespace gpu {
// Minimal thrift implementation for parsing page headers
static const __device__ __constant__ uint8_t g_list2struct[16] = {0,
1,
2,
ST_FLD_BYTE,
ST_FLD_DOUBLE,
5,
ST_FLD_I16,
7,
ST_FLD_I32,
9,
ST_FLD_I64,
ST_FLD_BINARY,
ST_FLD_STRUCT,
ST_FLD_MAP,
ST_FLD_SET,
ST_FLD_LIST};
struct byte_stream_s {
const uint8_t *cur;
const uint8_t *end;
const uint8_t *base;
// Parsed symbols
PageType page_type;
PageInfo page;
ColumnChunkDesc ck;
};
inline __device__ unsigned int getb(byte_stream_s *bs)
{
return (bs->cur < bs->end) ? *bs->cur++ : 0;
}
inline __device__ void skip_bytes(byte_stream_s *bs, size_t bytecnt)
{
bytecnt = min(bytecnt, (size_t)(bs->end - bs->cur));
bs->cur += bytecnt;
}
__device__ uint32_t get_u32(byte_stream_s *bs)
{
uint32_t v = 0, l = 0, c;
do {
c = getb(bs);
v |= (c & 0x7f) << l;
l += 7;
} while (c & 0x80);
return v;
}
inline __device__ int32_t get_i32(byte_stream_s *bs)
{
uint32_t u = get_u32(bs);
return (int32_t)((u >> 1u) ^ -(int32_t)(u & 1));
}
__device__ void skip_struct_field(byte_stream_s *bs, int t)
{
int struct_depth = 0;
int rep_cnt = 0;
do {
if (rep_cnt != 0) {
rep_cnt--;
} else if (struct_depth != 0) {
int c;
do {
c = getb(bs);
if (!c) --struct_depth;
} while (!c && struct_depth);
if (!struct_depth) break;
t = c & 0xf;
if (!(c & 0xf0)) get_i32(bs);
}
switch (t) {
case ST_FLD_TRUE:
case ST_FLD_FALSE: break;
case ST_FLD_I16:
case ST_FLD_I32:
case ST_FLD_I64: get_u32(bs); break;
case ST_FLD_BYTE: skip_bytes(bs, 1); break;
case ST_FLD_DOUBLE: skip_bytes(bs, 8); break;
case ST_FLD_BINARY: skip_bytes(bs, get_u32(bs)); break;
case ST_FLD_LIST:
case ST_FLD_SET: { // NOTE: skipping a list of lists is not handled
int c = getb(bs);
int n = c >> 4;
if (n == 0xf) n = get_u32(bs);
t = g_list2struct[c & 0xf];
if (t == ST_FLD_STRUCT)
struct_depth += n;
else
rep_cnt = n;
} break;
case ST_FLD_STRUCT: struct_depth++; break;
}
} while (rep_cnt || struct_depth);
}
#define PARQUET_BEGIN_STRUCT(fn) \
__device__ bool fn(byte_stream_s *bs) \
{ \
int fld = 0; \
for (;;) { \
int c, t, f; \
c = getb(bs); \
if (!c) break; \
f = c >> 4; \
t = c & 0xf; \
fld = (f) ? fld + f : get_i32(bs); \
switch (fld) {
#define PARQUET_FLD_ENUM(id, m, mt) \
case id: \
bs->m = (mt)get_i32(bs); \
if (t != ST_FLD_I32) return false; \
break;
#define PARQUET_FLD_INT32(id, m) \
case id: \
bs->m = get_i32(bs); \
if (t != ST_FLD_I32) return false; \
break;
#define PARQUET_FLD_STRUCT(id, m) \
case id: \
if (t != ST_FLD_STRUCT || !m(bs)) return false; \
break;
#define PARQUET_END_STRUCT() \
default: skip_struct_field(bs, t); break; \
} \
} \
return true; \
}
PARQUET_BEGIN_STRUCT(gpuParseDataPageHeader)
PARQUET_FLD_INT32(1, page.num_input_values)
PARQUET_FLD_ENUM(2, page.encoding, Encoding);
PARQUET_FLD_ENUM(3, page.definition_level_encoding, Encoding);
PARQUET_FLD_ENUM(4, page.repetition_level_encoding, Encoding);
PARQUET_END_STRUCT()
PARQUET_BEGIN_STRUCT(gpuParseDictionaryPageHeader)
PARQUET_FLD_INT32(1, page.num_input_values)
PARQUET_FLD_ENUM(2, page.encoding, Encoding);
PARQUET_END_STRUCT()
PARQUET_BEGIN_STRUCT(gpuParseDataPageHeaderV2)
PARQUET_FLD_INT32(1, page.num_input_values)
PARQUET_FLD_INT32(3, page.num_rows)
PARQUET_FLD_ENUM(4, page.encoding, Encoding);
PARQUET_FLD_ENUM(5, page.definition_level_encoding, Encoding);
PARQUET_FLD_ENUM(6, page.repetition_level_encoding, Encoding);
PARQUET_END_STRUCT()
PARQUET_BEGIN_STRUCT(gpuParsePageHeader)
PARQUET_FLD_ENUM(1, page_type, PageType)
PARQUET_FLD_INT32(2, page.uncompressed_page_size)
PARQUET_FLD_INT32(3, page.compressed_page_size)
PARQUET_FLD_STRUCT(5, gpuParseDataPageHeader)
PARQUET_FLD_STRUCT(7, gpuParseDictionaryPageHeader)
PARQUET_FLD_STRUCT(8, gpuParseDataPageHeaderV2)
PARQUET_END_STRUCT()
/**
* @brief Kernel for outputting page headers from the specified column chunks
*
* @param[in] chunks List of column chunks
* @param[in] num_chunks Number of column chunks
*/
// blockDim {128,1,1}
extern "C" __global__ void __launch_bounds__(128)
gpuDecodePageHeaders(ColumnChunkDesc *chunks, int32_t num_chunks)
{
__shared__ byte_stream_s bs_g[4];
int t = threadIdx.x & 0x1f;
int chunk = (blockIdx.x << 2) + (threadIdx.x >> 5);
byte_stream_s *const bs = &bs_g[threadIdx.x >> 5];
if (chunk < num_chunks) {
// NOTE: Assumes that sizeof(ColumnChunkDesc) <= 128
if (t < sizeof(ColumnChunkDesc) / sizeof(uint32_t)) {
((uint32_t *)&bs->ck)[t] = ((const uint32_t *)&chunks[chunk])[t];
}
}
__syncthreads();
if (chunk < num_chunks) {
size_t num_values, values_found;
uint32_t data_page_count = 0;
uint32_t dictionary_page_count = 0;
int32_t max_num_pages;
int32_t num_dict_pages = bs->ck.num_dict_pages;
PageInfo *page_info;
if (!t) {
bs->base = bs->cur = bs->ck.compressed_data;
bs->end = bs->base + bs->ck.compressed_size;
bs->page.chunk_idx = chunk;
bs->page.column_idx = bs->ck.dst_col_index;
// this computation is only valid for flat schemas. for nested schemas,
// they will be recomputed in the preprocess step by examining repetition and
// definition levels
bs->page.chunk_row = 0;
bs->page.num_rows = 0;
}
num_values = bs->ck.num_values;
page_info = bs->ck.page_info;
num_dict_pages = bs->ck.num_dict_pages;
max_num_pages = (page_info) ? bs->ck.max_num_pages : 0;
values_found = 0;
SYNCWARP();
while (values_found < num_values && bs->cur < bs->end) {
int index_out = -1;
if (t == 0) {
// this computation is only valid for flat schemas. for nested schemas,
// they will be recomputed in the preprocess step by examining repetition and
// definition levels
bs->page.chunk_row += bs->page.num_rows;
bs->page.num_rows = 0;
if (gpuParsePageHeader(bs) && bs->page.compressed_page_size >= 0) {
switch (bs->page_type) {
case DATA_PAGE:
// this computation is only valid for flat schemas. for nested schemas,
// they will be recomputed in the preprocess step by examining repetition and
// definition levels
bs->page.num_rows = bs->page.num_input_values;
case DATA_PAGE_V2:
index_out = num_dict_pages + data_page_count;
data_page_count++;
bs->page.flags = 0;
values_found += bs->page.num_input_values;
break;
case DICTIONARY_PAGE:
index_out = dictionary_page_count;
dictionary_page_count++;
bs->page.flags = PAGEINFO_FLAGS_DICTIONARY;
break;
default: index_out = -1; break;
}
bs->page.page_data = const_cast<uint8_t *>(bs->cur);
bs->cur += bs->page.compressed_page_size;
} else {
bs->cur = bs->end;
}
}
index_out = SHFL0(index_out);
if (index_out >= 0 && index_out < max_num_pages) {
// NOTE: Assumes that sizeof(PageInfo) <= 128
if (t < sizeof(PageInfo) / sizeof(uint32_t)) {
((uint32_t *)(page_info + index_out))[t] = ((const uint32_t *)&bs->page)[t];
}
}
num_values = SHFL0(num_values);
SYNCWARP();
}
if (t == 0) {
chunks[chunk].num_data_pages = data_page_count;
chunks[chunk].num_dict_pages = dictionary_page_count;
}
}
}
/**
* @brief Kernel for building dictionary index for the specified column chunks
*
* This function builds an index to point to each dictionary entry
* (string format is 4-byte little-endian string length followed by character
* data). The index is a 32-bit integer which contains the offset of each string
* relative to the beginning of the dictionary page data.
*
* @param[in] chunks List of column chunks
* @param[in] num_chunks Number of column chunks
*/
// blockDim {128,1,1}
extern "C" __global__ void __launch_bounds__(128)
gpuBuildStringDictionaryIndex(ColumnChunkDesc *chunks, int32_t num_chunks)
{
__shared__ ColumnChunkDesc chunk_g[4];
int t = threadIdx.x & 0x1f;
int chunk = (blockIdx.x << 2) + (threadIdx.x >> 5);
ColumnChunkDesc *const ck = &chunk_g[threadIdx.x >> 5];
if (chunk < num_chunks) {
// NOTE: Assumes that sizeof(ColumnChunkDesc) <= 128
if (t < sizeof(ColumnChunkDesc) / sizeof(uint32_t)) {
((uint32_t *)ck)[t] = ((const uint32_t *)&chunks[chunk])[t];
}
}
__syncthreads();
if (chunk >= num_chunks) { return; }
if (!t && ck->num_dict_pages > 0 && ck->str_dict_index) {
// Data type to describe a string
nvstrdesc_s *dict_index = ck->str_dict_index;
const uint8_t *dict = ck->page_info[0].page_data;
int dict_size = ck->page_info[0].uncompressed_page_size;
int num_entries = ck->page_info[0].num_input_values;
int pos = 0, cur = 0;
for (int i = 0; i < num_entries; i++) {
int len = 0;
if (cur + 4 <= dict_size) {
len = dict[cur + 0] | (dict[cur + 1] << 8) | (dict[cur + 2] << 16) | (dict[cur + 3] << 24);
if (len >= 0 && cur + 4 + len <= dict_size) {
pos = cur;
cur = cur + 4 + len;
} else {
cur = dict_size;
}
}
// TODO: Could store 8 entries in shared mem, then do a single warp-wide store
dict_index[i].ptr = (const char *)(dict + pos + 4);
dict_index[i].count = len;
}
}
}
cudaError_t __host__ DecodePageHeaders(ColumnChunkDesc *chunks,
int32_t num_chunks,
cudaStream_t stream)
{
dim3 dim_block(128, 1);
dim3 dim_grid((num_chunks + 3) >> 2, 1); // 1 chunk per warp, 4 warps per block
gpuDecodePageHeaders<<<dim_grid, dim_block, 0, stream>>>(chunks, num_chunks);
return cudaSuccess;
}
cudaError_t __host__ BuildStringDictionaryIndex(ColumnChunkDesc *chunks,
int32_t num_chunks,
cudaStream_t stream)
{
dim3 dim_block(128, 1);
dim3 dim_grid((num_chunks + 3) >> 2, 1); // 1 chunk per warp, 4 warps per block
gpuBuildStringDictionaryIndex<<<dim_grid, dim_block, 0, stream>>>(chunks, num_chunks);
return cudaSuccess;
}
} // namespace gpu
} // namespace parquet
} // namespace io
} // namespace cudf
|
51a2ef4b81fefa4039b5580845482fa15e11edae.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ------------------------------------------------------------------
// SIAMESE RECURRENT ARCHITECTURE FOR VISUAL TRACKING
// Version 1.0, Copyright(c) July, 2017
// Xiaqing Xu, Bingpeng Ma, Hong Chang, Xilin Chen
// Written by Xiaqing Xu
// ------------------------------------------------------------------
#include <vector>
#include <iostream>
#include "caffe/filler.hpp"
#include "caffe/layers/spatial_irnn_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe{
template <typename Dtype>
__global__ void ReLUForward(const int n, Dtype* in) {
CUDA_KERNEL_LOOP(index, n) {
in[index] = in[index] > 0 ? in[index] : Dtype(0.);
}
}
template <typename Dtype>
__global__ void ReLUBackward(const int n, Dtype* out_diff,const Dtype* top_data) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = Dtype(1.) * (top_data[index] > 0);
}
}
template <typename Dtype>
void RNNDOWNLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top){
const Dtype* bottom_data = bottom[0]->gpu_data();
const int count = top[0]->count();
const Dtype* w = this->blobs_[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
caffe_copy(count, bottom_data, top_data);
for(int i = 0; i < H_; i++){
if(i > 0){
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, NH_, W_ * N_, NH_, Dtype(1.),
w, top_data + (i - 1) * NH_ * N_* W_ , Dtype(1.),
top_data + i * NH_ * N_* W_);
}
hipLaunchKernelGGL(( ReLUForward<Dtype>), dim3(CAFFE_GET_BLOCKS(NH_*W_*N_)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
NH_ * W_ * N_, top_data + i * NH_ * N_* W_);
CUDA_POST_KERNEL_CHECK;
}
}
template <typename Dtype>
void RNNDOWNLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom){
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* top_data = top[0]->gpu_data();
const int count = bottom[0]->count();
const Dtype* w = this->blobs_[0]->gpu_data();
Dtype* w_diff = this->blobs_[0]->mutable_gpu_diff();
// dh
Dtype* h_diff = cache_.mutable_gpu_data();
// f'(h)
Dtype* f_diff = cache_.mutable_gpu_diff();
Dtype* hh_diff = hh_.mutable_gpu_diff();
hipLaunchKernelGGL(( ReLUBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, f_diff, top_data);
CUDA_POST_KERNEL_CHECK;
caffe_copy(count, top_diff, h_diff);
for(int i = H_ - 1; i >= 0; i--){
// dzdf
caffe_gpu_mul(NH_ * W_ * N_, h_diff + i * NH_ * N_ * W_,
f_diff + i * NH_ * N_ * W_, f_diff + i * NH_ * N_* W_);
// dzdhh
caffe_gpu_gemm(CblasTrans, CblasNoTrans, NH_, W_ * N_, NH_, Dtype(1.),
w, f_diff + i * NH_ * N_* W_ , Dtype(0.), hh_diff);
if(i > 0){
caffe_gpu_add(NH_ * W_ * N_, hh_diff,
h_diff + (i - 1)* NH_ * N_ * W_,
h_diff + (i - 1)* NH_ * N_ * W_);
caffe_gpu_gemm(CblasNoTrans, CblasTrans, NH_, NH_, W_ * N_, Dtype(1.),
f_diff + i * NH_ * N_ * W_ , top_data + (i - 1) * NH_ * N_ * W_,
Dtype(1.), w_diff);
}
}
if(propagate_down[0]){
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
caffe_copy(bottom[0]->count(), f_diff, bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(RNNDOWNLayer);
} // namespace caffe
| 51a2ef4b81fefa4039b5580845482fa15e11edae.cu | // ------------------------------------------------------------------
// SIAMESE RECURRENT ARCHITECTURE FOR VISUAL TRACKING
// Version 1.0, Copyright(c) July, 2017
// Xiaqing Xu, Bingpeng Ma, Hong Chang, Xilin Chen
// Written by Xiaqing Xu
// ------------------------------------------------------------------
#include <vector>
#include <iostream>
#include "caffe/filler.hpp"
#include "caffe/layers/spatial_irnn_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe{
template <typename Dtype>
__global__ void ReLUForward(const int n, Dtype* in) {
CUDA_KERNEL_LOOP(index, n) {
in[index] = in[index] > 0 ? in[index] : Dtype(0.);
}
}
template <typename Dtype>
__global__ void ReLUBackward(const int n, Dtype* out_diff,const Dtype* top_data) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = Dtype(1.) * (top_data[index] > 0);
}
}
template <typename Dtype>
void RNNDOWNLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top){
const Dtype* bottom_data = bottom[0]->gpu_data();
const int count = top[0]->count();
const Dtype* w = this->blobs_[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
caffe_copy(count, bottom_data, top_data);
for(int i = 0; i < H_; i++){
if(i > 0){
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, NH_, W_ * N_, NH_, Dtype(1.),
w, top_data + (i - 1) * NH_ * N_* W_ , Dtype(1.),
top_data + i * NH_ * N_* W_);
}
ReLUForward<Dtype><<<CAFFE_GET_BLOCKS(NH_*W_*N_), CAFFE_CUDA_NUM_THREADS>>>(
NH_ * W_ * N_, top_data + i * NH_ * N_* W_);
CUDA_POST_KERNEL_CHECK;
}
}
template <typename Dtype>
void RNNDOWNLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom){
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* top_data = top[0]->gpu_data();
const int count = bottom[0]->count();
const Dtype* w = this->blobs_[0]->gpu_data();
Dtype* w_diff = this->blobs_[0]->mutable_gpu_diff();
// dh
Dtype* h_diff = cache_.mutable_gpu_data();
// f'(h)
Dtype* f_diff = cache_.mutable_gpu_diff();
Dtype* hh_diff = hh_.mutable_gpu_diff();
ReLUBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, f_diff, top_data);
CUDA_POST_KERNEL_CHECK;
caffe_copy(count, top_diff, h_diff);
for(int i = H_ - 1; i >= 0; i--){
// dzdf
caffe_gpu_mul(NH_ * W_ * N_, h_diff + i * NH_ * N_ * W_,
f_diff + i * NH_ * N_ * W_, f_diff + i * NH_ * N_* W_);
// dzdhh
caffe_gpu_gemm(CblasTrans, CblasNoTrans, NH_, W_ * N_, NH_, Dtype(1.),
w, f_diff + i * NH_ * N_* W_ , Dtype(0.), hh_diff);
if(i > 0){
caffe_gpu_add(NH_ * W_ * N_, hh_diff,
h_diff + (i - 1)* NH_ * N_ * W_,
h_diff + (i - 1)* NH_ * N_ * W_);
caffe_gpu_gemm(CblasNoTrans, CblasTrans, NH_, NH_, W_ * N_, Dtype(1.),
f_diff + i * NH_ * N_ * W_ , top_data + (i - 1) * NH_ * N_ * W_,
Dtype(1.), w_diff);
}
}
if(propagate_down[0]){
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
caffe_copy(bottom[0]->count(), f_diff, bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(RNNDOWNLayer);
} // namespace caffe
|
41e5c3aa03ae63b8cf584642e6894df8a285a028.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "box2d4r-512-4-256_kernel.hu"
__device__ double __sbref_wrap(double *sb, size_t index) { return sb[index]; }
__global__ void kernel0_4(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 4 - 4);
const AN5D_TYPE __c1Pad = (4);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 4 - 4);
const AN5D_TYPE __c2Pad = (4);
#define __c2 c2
const AN5D_TYPE __halo1 = 4;
const AN5D_TYPE __halo2 = 4;
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 480;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_1_5;
double __reg_1_6;
double __reg_1_7;
double __reg_1_8;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_2_5;
double __reg_2_6;
double __reg_2_7;
double __reg_2_8;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_3_5;
double __reg_3_6;
double __reg_3_7;
double __reg_3_8;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
double __reg_4_5;
double __reg_4_6;
double __reg_4_7;
double __reg_4_8;
__shared__ double __a_sb_double[__blockSize * 2];
double *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __storeValid = __writeValid4;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((0.00930f * (__SBREF(__a_sb, -4))) + (0.00931f * (__SBREF(__a_sb, -3)))) + (0.00932f * (__SBREF(__a_sb, -2)))) + (0.00933f * (__SBREF(__a_sb, -1)))) + (0.00934f * (__REGREF(__a, 0)))) + (0.00935f * (__SBREF(__a_sb, 1)))) + (0.00936f * (__SBREF(__a_sb, 2)))) + (0.00937f * (__SBREF(__a_sb, 3)))) + (0.00938f * (__SBREF(__a_sb, 4)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((0.00939f * (__SBREF(__a_sb, -4)))) + (0.00940f * (__SBREF(__a_sb, -3)))) + (0.00941f * (__SBREF(__a_sb, -2)))) + (0.00942f * (__SBREF(__a_sb, -1)))) + (0.00943f * (__REGREF(__a, 0)))) + (0.00944f * (__SBREF(__a_sb, 1)))) + (0.00945f * (__SBREF(__a_sb, 2)))) + (0.00946f * (__SBREF(__a_sb, 3)))) + (0.00947f * (__SBREF(__a_sb, 4))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((0.00948f * (__SBREF(__a_sb, -4)))) + (0.00949f * (__SBREF(__a_sb, -3)))) + (0.00950f * (__SBREF(__a_sb, -2)))) + (0.00951f * (__SBREF(__a_sb, -1)))) + (0.00952f * (__REGREF(__a, 0)))) + (0.00953f * (__SBREF(__a_sb, 1)))) + (0.00954f * (__SBREF(__a_sb, 2)))) + (0.00955f * (__SBREF(__a_sb, 3)))) + (0.00956f * (__SBREF(__a_sb, 4)))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((((((((((((((((((((0.00957f * (__SBREF(__a_sb, -4)))) + (0.00958f * (__SBREF(__a_sb, -3)))) + (0.00959f * (__SBREF(__a_sb, -2)))) + (0.00960f * (__SBREF(__a_sb, -1)))) + (0.00961f * (__REGREF(__a, 0)))) + (0.00962f * (__SBREF(__a_sb, 1)))) + (0.00963f * (__SBREF(__a_sb, 2)))) + (0.00964f * (__SBREF(__a_sb, 3)))) + (0.00965f * (__SBREF(__a_sb, 4))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { double etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((0.00966f * (__SBREF(__a_sb, -4)))) + (0.00967f * (__SBREF(__a_sb, -3)))) + (0.00968f * (__SBREF(__a_sb, -2)))) + (0.00969f * (__SBREF(__a_sb, -1)))) + (0.22400f * (__REGREF(__a, 0)))) + (0.00971f * (__SBREF(__a_sb, 1)))) + (0.00972f * (__SBREF(__a_sb, 2)))) + (0.00973f * (__SBREF(__a_sb, 3)))) + (0.00974f * (__SBREF(__a_sb, 4)))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { double etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_5_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((0.00975f * (__SBREF(__a_sb, -4)))) + (0.00976f * (__SBREF(__a_sb, -3)))) + (0.00977f * (__SBREF(__a_sb, -2)))) + (0.00978f * (__SBREF(__a_sb, -1)))) + (0.00979f * (__REGREF(__a, 0)))) + (0.00980f * (__SBREF(__a_sb, 1)))) + (0.00981f * (__SBREF(__a_sb, 2)))) + (0.00982f * (__SBREF(__a_sb, 3)))) + (0.00983f * (__SBREF(__a_sb, 4))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_5(out, a) do { double etmp; __CALCEXPR_5_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_6_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((0.00984f * (__SBREF(__a_sb, -4)))) + (0.00985f * (__SBREF(__a_sb, -3)))) + (0.00986f * (__SBREF(__a_sb, -2)))) + (0.00987f * (__SBREF(__a_sb, -1)))) + (0.00988f * (__REGREF(__a, 0)))) + (0.00989f * (__SBREF(__a_sb, 1)))) + (0.00990f * (__SBREF(__a_sb, 2)))) + (0.00991f * (__SBREF(__a_sb, 3)))) + (0.00992f * (__SBREF(__a_sb, 4)))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_6(out, a) do { double etmp; __CALCEXPR_6_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_7_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((0.00993f * (__SBREF(__a_sb, -4)))) + (0.00994f * (__SBREF(__a_sb, -3)))) + (0.00995f * (__SBREF(__a_sb, -2)))) + (0.00996f * (__SBREF(__a_sb, -1)))) + (0.00997f * (__REGREF(__a, 0)))) + (0.00998f * (__SBREF(__a_sb, 1)))) + (0.00999f * (__SBREF(__a_sb, 2)))) + (0.01000f * (__SBREF(__a_sb, 3)))) + (0.01001f * (__SBREF(__a_sb, 4))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_7(out, a) do { double etmp; __CALCEXPR_7_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_8_wrap(__rn0, __a) do { __rn0 = ((((((((((0.01002f * (__SBREF(__a_sb, -4)))) + (0.01003f * (__SBREF(__a_sb, -3)))) + (0.01004f * (__SBREF(__a_sb, -2)))) + (0.01005f * (__SBREF(__a_sb, -1)))) + (0.01006f * (__REGREF(__a, 0)))) + (0.01007f * (__SBREF(__a_sb, 1)))) + (0.01008f * (__SBREF(__a_sb, 2)))) + (0.01009f * (__SBREF(__a_sb, 3)))) + (0.01010f * (__SBREF(__a_sb, 4)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_8(out, a) do { double etmp; __CALCEXPR_8_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); __CALCEXPR_5(out5, reg); __CALCEXPR_6(out6, reg); __CALCEXPR_7(out7, reg); __CALCEXPR_8(out8, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg); } else out4 = reg; } while (0)
#define __CALC2(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg); } else out4 = reg; } while (0)
#define __CALC3(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg); } else out4 = reg; } while (0)
#define __CALC4(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg); } else out4 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_0);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_0);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_0);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_0);
__CALC4(__reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_0);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_0);
__CALC4(__reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_0);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_0);
__CALC4(__reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__CALC4(__reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_4);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_5);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_6);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_3_7);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_3_8);
__STORE(4, __reg_4_4);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_3_0);
__STORE(5, __reg_4_5);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__CALC4(__reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_3_1);
__STORE(6, __reg_4_6);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__CALC4(__reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_3_2);
__STORE(7, __reg_4_7);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__CALC4(__reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_3_3);
__STORE(8, __reg_4_8);
__LOAD(__reg_0, 25);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__CALC4(__reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_4);
__STORE(9, __reg_4_0);
__LOAD(__reg_0, 26);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_5);
__STORE(10, __reg_4_1);
__LOAD(__reg_0, 27);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_6);
__STORE(11, __reg_4_2);
__LOAD(__reg_0, 28);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_3_7);
__STORE(12, __reg_4_3);
__LOAD(__reg_0, 29);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_3_8);
__STORE(13, __reg_4_4);
__LOAD(__reg_0, 30);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_3_0);
__STORE(14, __reg_4_5);
__LOAD(__reg_0, 31);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__CALC4(__reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_3_1);
__STORE(15, __reg_4_6);
__LOAD(__reg_0, 32);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__CALC4(__reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_3_2);
__STORE(16, __reg_4_7);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__CALC4(__reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_3_3);
__LOAD(__reg_0, 25);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__CALC4(__reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_4);
__LOAD(__reg_0, 26);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_5);
__LOAD(__reg_0, 27);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_6);
__LOAD(__reg_0, 28);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_3_7);
__LOAD(__reg_0, 29);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_3_8);
__LOAD(__reg_0, 30);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_3_0);
__LOAD(__reg_0, 31);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__CALC4(__reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_3_1);
__LOAD(__reg_0, 32);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__CALC4(__reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_3_2);
__STORE(16, __reg_4_7);
}
__a_sb = __a_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 33; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 13;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__CALC4(__reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_3_3);
__STORE(__h - 16, __reg_4_8);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__CALC4(__reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_4);
__STORE(__h - 16, __reg_4_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_5);
__STORE(__h - 16, __reg_4_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_6);
__STORE(__h - 16, __reg_4_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_3_7);
__STORE(__h - 16, __reg_4_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_3_8);
__STORE(__h - 16, __reg_4_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_3_0);
__STORE(__h - 16, __reg_4_5);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__CALC4(__reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_3_1);
__STORE(__h - 16, __reg_4_6);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__CALC4(__reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_3_2);
__STORE(__h - 16, __reg_4_7);
__h++;
}
if (0) {}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__CALC4(__reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_3_3);
__STORE(__h - 16, __reg_4_8);
__reg_1_6 = __reg_0;
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__CALC4(__reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_4);
__STORE(__h - 15, __reg_4_0);
__reg_1_7 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_5);
__STORE(__h - 14, __reg_4_1);
__reg_1_8 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_6);
__STORE(__h - 13, __reg_4_2);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_3_7);
__STORE(__h - 12, __reg_4_3);
__reg_2_6 = __reg_1_6;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_3_8);
__STORE(__h - 11, __reg_4_4);
__reg_2_7 = __reg_1_7;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_3_0);
__STORE(__h - 10, __reg_4_5);
__reg_2_8 = __reg_1_8;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__CALC4(__reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_3_1);
__STORE(__h - 9, __reg_4_6);
__reg_2_0 = __reg_1_0;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__CALC4(__reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_3_2);
__STORE(__h - 8, __reg_4_7);
__reg_3_6 = __reg_2_6;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__CALC4(__reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_3_3);
__STORE(__h - 7, __reg_4_8);
__reg_3_7 = __reg_2_7;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_5, __reg_3_4, __reg_2_8);
__CALC4(__reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_4);
__STORE(__h - 6, __reg_4_0);
__reg_3_8 = __reg_2_8;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_5, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_5);
__STORE(__h - 5, __reg_4_1);
__reg_3_0 = __reg_2_0;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_6);
__STORE(__h - 4, __reg_4_2);
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_5, __reg_4_4, __reg_4_3, __reg_3_7);
__STORE(__h - 3, __reg_4_3);
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_5, __reg_4_4, __reg_3_8);
__STORE(__h - 2, __reg_4_4);
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_5, __reg_3_0);
__STORE(__h - 1, __reg_4_5);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__CALC4(__reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_3_3);
__STORE(__h - 16, __reg_4_8);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__CALC4(__reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_4);
__STORE(__h - 15, __reg_4_0);
__reg_1_7 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_5);
__STORE(__h - 14, __reg_4_1);
__reg_1_8 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_6);
__STORE(__h - 13, __reg_4_2);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_3_7);
__STORE(__h - 12, __reg_4_3);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_3_8);
__STORE(__h - 11, __reg_4_4);
__reg_2_7 = __reg_1_7;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_3_0);
__STORE(__h - 10, __reg_4_5);
__reg_2_8 = __reg_1_8;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_6, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__CALC4(__reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_3_1);
__STORE(__h - 9, __reg_4_6);
__reg_2_0 = __reg_1_0;
__CALC2(__reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_6, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__CALC4(__reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_3_2);
__STORE(__h - 8, __reg_4_7);
__reg_2_1 = __reg_1_1;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__CALC4(__reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_3_3);
__STORE(__h - 7, __reg_4_8);
__reg_3_7 = __reg_2_7;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__CALC4(__reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_4);
__STORE(__h - 6, __reg_4_0);
__reg_3_8 = __reg_2_8;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_6, __reg_3_5, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_5);
__STORE(__h - 5, __reg_4_1);
__reg_3_0 = __reg_2_0;
__CALC3(__reg_3_5, __reg_3_5, __reg_3_5, __reg_3_5, __reg_3_5, __reg_3_5, __reg_3_5, __reg_3_5, __reg_3_6, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_6);
__STORE(__h - 4, __reg_4_2);
__reg_3_1 = __reg_2_1;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_3_7);
__STORE(__h - 3, __reg_4_3);
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_6, __reg_4_5, __reg_4_4, __reg_3_8);
__STORE(__h - 2, __reg_4_4);
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_6, __reg_4_5, __reg_3_0);
__STORE(__h - 1, __reg_4_5);
__CALC4(__reg_4_5, __reg_4_5, __reg_4_5, __reg_4_5, __reg_4_5, __reg_4_5, __reg_4_5, __reg_4_5, __reg_4_6, __reg_3_1);
__STORE(__h + 0, __reg_4_6);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__CALC4(__reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_3_3);
__STORE(__h - 16, __reg_4_8);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__CALC4(__reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_4);
__STORE(__h - 15, __reg_4_0);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_5);
__STORE(__h - 14, __reg_4_1);
__reg_1_8 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_6);
__STORE(__h - 13, __reg_4_2);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_3_7);
__STORE(__h - 12, __reg_4_3);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_3_8);
__STORE(__h - 11, __reg_4_4);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_3_0);
__STORE(__h - 10, __reg_4_5);
__reg_2_8 = __reg_1_8;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__CALC4(__reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_3_1);
__STORE(__h - 9, __reg_4_6);
__reg_2_0 = __reg_1_0;
__CALC2(__reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_7, __reg_2_6, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__CALC4(__reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_3_2);
__STORE(__h - 8, __reg_4_7);
__reg_2_1 = __reg_1_1;
__CALC2(__reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__CALC4(__reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_3_3);
__STORE(__h - 7, __reg_4_8);
__reg_2_2 = __reg_1_2;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__CALC4(__reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_4);
__STORE(__h - 6, __reg_4_0);
__reg_3_8 = __reg_2_8;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_5);
__STORE(__h - 5, __reg_4_1);
__reg_3_0 = __reg_2_0;
__CALC3(__reg_3_5, __reg_3_5, __reg_3_5, __reg_3_5, __reg_3_5, __reg_3_5, __reg_3_5, __reg_3_7, __reg_3_6, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_6);
__STORE(__h - 4, __reg_4_2);
__reg_3_1 = __reg_2_1;
__CALC3(__reg_3_6, __reg_3_6, __reg_3_6, __reg_3_6, __reg_3_6, __reg_3_6, __reg_3_6, __reg_3_6, __reg_3_7, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_3_7);
__STORE(__h - 3, __reg_4_3);
__reg_3_2 = __reg_2_2;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_3_8);
__STORE(__h - 2, __reg_4_4);
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_7, __reg_4_6, __reg_4_5, __reg_3_0);
__STORE(__h - 1, __reg_4_5);
__CALC4(__reg_4_5, __reg_4_5, __reg_4_5, __reg_4_5, __reg_4_5, __reg_4_5, __reg_4_5, __reg_4_7, __reg_4_6, __reg_3_1);
__STORE(__h + 0, __reg_4_6);
__CALC4(__reg_4_6, __reg_4_6, __reg_4_6, __reg_4_6, __reg_4_6, __reg_4_6, __reg_4_6, __reg_4_6, __reg_4_7, __reg_3_2);
__STORE(__h + 1, __reg_4_7);
}
else if (__h + 7 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__CALC4(__reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_3_3);
__STORE(__h - 16, __reg_4_8);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__CALC4(__reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_4);
__STORE(__h - 15, __reg_4_0);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_5);
__STORE(__h - 14, __reg_4_1);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_6);
__STORE(__h - 13, __reg_4_2);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_3_7);
__STORE(__h - 12, __reg_4_3);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_3_8);
__STORE(__h - 11, __reg_4_4);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_3_0);
__STORE(__h - 10, __reg_4_5);
__reg_1_3 = __reg_0;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__CALC4(__reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_3_1);
__STORE(__h - 9, __reg_4_6);
__reg_2_0 = __reg_1_0;
__CALC2(__reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__CALC4(__reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_3_2);
__STORE(__h - 8, __reg_4_7);
__reg_2_1 = __reg_1_1;
__CALC2(__reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__CALC4(__reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_3_3);
__STORE(__h - 7, __reg_4_8);
__reg_2_2 = __reg_1_2;
__CALC2(__reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__CALC4(__reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_4);
__STORE(__h - 6, __reg_4_0);
__reg_2_3 = __reg_1_3;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_5);
__STORE(__h - 5, __reg_4_1);
__reg_3_0 = __reg_2_0;
__CALC3(__reg_3_5, __reg_3_5, __reg_3_5, __reg_3_5, __reg_3_5, __reg_3_5, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_6);
__STORE(__h - 4, __reg_4_2);
__reg_3_1 = __reg_2_1;
__CALC3(__reg_3_6, __reg_3_6, __reg_3_6, __reg_3_6, __reg_3_6, __reg_3_6, __reg_3_6, __reg_3_8, __reg_3_7, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_3_7);
__STORE(__h - 3, __reg_4_3);
__reg_3_2 = __reg_2_2;
__CALC3(__reg_3_7, __reg_3_7, __reg_3_7, __reg_3_7, __reg_3_7, __reg_3_7, __reg_3_7, __reg_3_7, __reg_3_8, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_3_8);
__STORE(__h - 2, __reg_4_4);
__reg_3_3 = __reg_2_3;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_3_0);
__STORE(__h - 1, __reg_4_5);
__CALC4(__reg_4_5, __reg_4_5, __reg_4_5, __reg_4_5, __reg_4_5, __reg_4_5, __reg_4_8, __reg_4_7, __reg_4_6, __reg_3_1);
__STORE(__h + 0, __reg_4_6);
__CALC4(__reg_4_6, __reg_4_6, __reg_4_6, __reg_4_6, __reg_4_6, __reg_4_6, __reg_4_6, __reg_4_8, __reg_4_7, __reg_3_2);
__STORE(__h + 1, __reg_4_7);
__CALC4(__reg_4_7, __reg_4_7, __reg_4_7, __reg_4_7, __reg_4_7, __reg_4_7, __reg_4_7, __reg_4_7, __reg_4_8, __reg_3_3);
__STORE(__h + 2, __reg_4_8);
}
else if (__h + 8 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__CALC4(__reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_3_3);
__STORE(__h - 16, __reg_4_8);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__CALC4(__reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_4);
__STORE(__h - 15, __reg_4_0);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_5);
__STORE(__h - 14, __reg_4_1);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_6);
__STORE(__h - 13, __reg_4_2);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_3_7);
__STORE(__h - 12, __reg_4_3);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_3_8);
__STORE(__h - 11, __reg_4_4);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_3_0);
__STORE(__h - 10, __reg_4_5);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__CALC4(__reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_3_1);
__STORE(__h - 9, __reg_4_6);
__reg_1_4 = __reg_0;
__CALC2(__reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__CALC4(__reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_3_2);
__STORE(__h - 8, __reg_4_7);
__reg_2_1 = __reg_1_1;
__CALC2(__reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__CALC4(__reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_3_3);
__STORE(__h - 7, __reg_4_8);
__reg_2_2 = __reg_1_2;
__CALC2(__reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__CALC4(__reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_4);
__STORE(__h - 6, __reg_4_0);
__reg_2_3 = __reg_1_3;
__CALC2(__reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_5);
__STORE(__h - 5, __reg_4_1);
__reg_2_4 = __reg_1_4;
__CALC3(__reg_3_5, __reg_3_5, __reg_3_5, __reg_3_5, __reg_3_5, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_6);
__STORE(__h - 4, __reg_4_2);
__reg_3_1 = __reg_2_1;
__CALC3(__reg_3_6, __reg_3_6, __reg_3_6, __reg_3_6, __reg_3_6, __reg_3_6, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_3_7);
__STORE(__h - 3, __reg_4_3);
__reg_3_2 = __reg_2_2;
__CALC3(__reg_3_7, __reg_3_7, __reg_3_7, __reg_3_7, __reg_3_7, __reg_3_7, __reg_3_7, __reg_3_0, __reg_3_8, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_3_8);
__STORE(__h - 2, __reg_4_4);
__reg_3_3 = __reg_2_3;
__CALC3(__reg_3_8, __reg_3_8, __reg_3_8, __reg_3_8, __reg_3_8, __reg_3_8, __reg_3_8, __reg_3_8, __reg_3_0, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_3_0);
__STORE(__h - 1, __reg_4_5);
__reg_3_4 = __reg_2_4;
__CALC4(__reg_4_5, __reg_4_5, __reg_4_5, __reg_4_5, __reg_4_5, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_3_1);
__STORE(__h + 0, __reg_4_6);
__CALC4(__reg_4_6, __reg_4_6, __reg_4_6, __reg_4_6, __reg_4_6, __reg_4_6, __reg_4_0, __reg_4_8, __reg_4_7, __reg_3_2);
__STORE(__h + 1, __reg_4_7);
__CALC4(__reg_4_7, __reg_4_7, __reg_4_7, __reg_4_7, __reg_4_7, __reg_4_7, __reg_4_7, __reg_4_0, __reg_4_8, __reg_3_3);
__STORE(__h + 2, __reg_4_8);
__CALC4(__reg_4_8, __reg_4_8, __reg_4_8, __reg_4_8, __reg_4_8, __reg_4_8, __reg_4_8, __reg_4_8, __reg_4_0, __reg_3_4);
__STORE(__h + 3, __reg_4_0);
}
else if (__h + 9 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__CALC4(__reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_3_3);
__STORE(__h - 16, __reg_4_8);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__CALC4(__reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_4);
__STORE(__h - 15, __reg_4_0);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_5);
__STORE(__h - 14, __reg_4_1);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_6);
__STORE(__h - 13, __reg_4_2);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_3_7);
__STORE(__h - 12, __reg_4_3);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_3_8);
__STORE(__h - 11, __reg_4_4);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_3_0);
__STORE(__h - 10, __reg_4_5);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__CALC4(__reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_3_1);
__STORE(__h - 9, __reg_4_6);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 8);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__CALC4(__reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_3_2);
__STORE(__h - 8, __reg_4_7);
__reg_1_5 = __reg_0;
__CALC2(__reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__CALC4(__reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_3_3);
__STORE(__h - 7, __reg_4_8);
__reg_2_2 = __reg_1_2;
__CALC2(__reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__CALC4(__reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_4);
__STORE(__h - 6, __reg_4_0);
__reg_2_3 = __reg_1_3;
__CALC2(__reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_5);
__STORE(__h - 5, __reg_4_1);
__reg_2_4 = __reg_1_4;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_6);
__STORE(__h - 4, __reg_4_2);
__reg_2_5 = __reg_1_5;
__CALC3(__reg_3_6, __reg_3_6, __reg_3_6, __reg_3_6, __reg_3_6, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_3_7);
__STORE(__h - 3, __reg_4_3);
__reg_3_2 = __reg_2_2;
__CALC3(__reg_3_7, __reg_3_7, __reg_3_7, __reg_3_7, __reg_3_7, __reg_3_7, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_3_8);
__STORE(__h - 2, __reg_4_4);
__reg_3_3 = __reg_2_3;
__CALC3(__reg_3_8, __reg_3_8, __reg_3_8, __reg_3_8, __reg_3_8, __reg_3_8, __reg_3_8, __reg_3_1, __reg_3_0, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_3_0);
__STORE(__h - 1, __reg_4_5);
__reg_3_4 = __reg_2_4;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_5);
__CALC4(__reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_3_1);
__STORE(__h + 0, __reg_4_6);
__reg_3_5 = __reg_2_5;
__CALC4(__reg_4_6, __reg_4_6, __reg_4_6, __reg_4_6, __reg_4_6, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_3_2);
__STORE(__h + 1, __reg_4_7);
__CALC4(__reg_4_7, __reg_4_7, __reg_4_7, __reg_4_7, __reg_4_7, __reg_4_7, __reg_4_1, __reg_4_0, __reg_4_8, __reg_3_3);
__STORE(__h + 2, __reg_4_8);
__CALC4(__reg_4_8, __reg_4_8, __reg_4_8, __reg_4_8, __reg_4_8, __reg_4_8, __reg_4_8, __reg_4_1, __reg_4_0, __reg_3_4);
__STORE(__h + 3, __reg_4_0);
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_1, __reg_3_5);
__STORE(__h + 4, __reg_4_1);
}
else if (__h + 10 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__CALC4(__reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_3_3);
__STORE(__h - 16, __reg_4_8);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__CALC4(__reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_4);
__STORE(__h - 15, __reg_4_0);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_5);
__STORE(__h - 14, __reg_4_1);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_6);
__STORE(__h - 13, __reg_4_2);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_3_7);
__STORE(__h - 12, __reg_4_3);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_3_8);
__STORE(__h - 11, __reg_4_4);
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_3_0);
__STORE(__h - 10, __reg_4_5);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__CALC4(__reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_3_1);
__STORE(__h - 9, __reg_4_6);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 8);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__CALC4(__reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_3_2);
__STORE(__h - 8, __reg_4_7);
__reg_1_5 = __reg_0;
__LOAD(__reg_0, __h + 9);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__CALC4(__reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_3_3);
__STORE(__h - 7, __reg_4_8);
__reg_1_6 = __reg_0;
__CALC2(__reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__CALC4(__reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_4);
__STORE(__h - 6, __reg_4_0);
__reg_2_3 = __reg_1_3;
__CALC2(__reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_5);
__STORE(__h - 5, __reg_4_1);
__reg_2_4 = __reg_1_4;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_6);
__STORE(__h - 4, __reg_4_2);
__reg_2_5 = __reg_1_5;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_3_7);
__STORE(__h - 3, __reg_4_3);
__reg_2_6 = __reg_1_6;
__CALC3(__reg_3_7, __reg_3_7, __reg_3_7, __reg_3_7, __reg_3_7, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_3_8);
__STORE(__h - 2, __reg_4_4);
__reg_3_3 = __reg_2_3;
__CALC3(__reg_3_8, __reg_3_8, __reg_3_8, __reg_3_8, __reg_3_8, __reg_3_8, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_3_0);
__STORE(__h - 1, __reg_4_5);
__reg_3_4 = __reg_2_4;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_5);
__CALC4(__reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_3_1);
__STORE(__h + 0, __reg_4_6);
__reg_3_5 = __reg_2_5;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_6);
__CALC4(__reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_3_2);
__STORE(__h + 1, __reg_4_7);
__reg_3_6 = __reg_2_6;
__CALC4(__reg_4_7, __reg_4_7, __reg_4_7, __reg_4_7, __reg_4_7, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_3_3);
__STORE(__h + 2, __reg_4_8);
__CALC4(__reg_4_8, __reg_4_8, __reg_4_8, __reg_4_8, __reg_4_8, __reg_4_8, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_4);
__STORE(__h + 3, __reg_4_0);
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_2, __reg_4_1, __reg_3_5);
__STORE(__h + 4, __reg_4_1);
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_2, __reg_3_6);
__STORE(__h + 5, __reg_4_2);
}
else if (__h + 11 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__CALC4(__reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_3_3);
__STORE(__h - 16, __reg_4_8);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__CALC4(__reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_4);
__STORE(__h - 15, __reg_4_0);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_5);
__STORE(__h - 14, __reg_4_1);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_6);
__STORE(__h - 13, __reg_4_2);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_3_7);
__STORE(__h - 12, __reg_4_3);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_3_8);
__STORE(__h - 11, __reg_4_4);
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_3_0);
__STORE(__h - 10, __reg_4_5);
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__CALC4(__reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_3_1);
__STORE(__h - 9, __reg_4_6);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 8);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__CALC4(__reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_3_2);
__STORE(__h - 8, __reg_4_7);
__reg_1_5 = __reg_0;
__LOAD(__reg_0, __h + 9);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__CALC4(__reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_3_3);
__STORE(__h - 7, __reg_4_8);
__reg_1_6 = __reg_0;
__LOAD(__reg_0, __h + 10);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__CALC4(__reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_4);
__STORE(__h - 6, __reg_4_0);
__reg_1_7 = __reg_0;
__CALC2(__reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_5);
__STORE(__h - 5, __reg_4_1);
__reg_2_4 = __reg_1_4;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_6);
__STORE(__h - 4, __reg_4_2);
__reg_2_5 = __reg_1_5;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_3_7);
__STORE(__h - 3, __reg_4_3);
__reg_2_6 = __reg_1_6;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_3_8);
__STORE(__h - 2, __reg_4_4);
__reg_2_7 = __reg_1_7;
__CALC3(__reg_3_8, __reg_3_8, __reg_3_8, __reg_3_8, __reg_3_8, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_3_0);
__STORE(__h - 1, __reg_4_5);
__reg_3_4 = __reg_2_4;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__CALC4(__reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_3_1);
__STORE(__h + 0, __reg_4_6);
__reg_3_5 = __reg_2_5;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_6);
__CALC4(__reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_3_2);
__STORE(__h + 1, __reg_4_7);
__reg_3_6 = __reg_2_6;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_7);
__CALC4(__reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_3_3);
__STORE(__h + 2, __reg_4_8);
__reg_3_7 = __reg_2_7;
__CALC4(__reg_4_8, __reg_4_8, __reg_4_8, __reg_4_8, __reg_4_8, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_4);
__STORE(__h + 3, __reg_4_0);
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_5);
__STORE(__h + 4, __reg_4_1);
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_3, __reg_4_2, __reg_3_6);
__STORE(__h + 5, __reg_4_2);
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_3, __reg_3_7);
__STORE(__h + 6, __reg_4_3);
}
else if (__h + 12 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__CALC4(__reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_3_3);
__STORE(__h - 16, __reg_4_8);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__CALC4(__reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_4);
__STORE(__h - 15, __reg_4_0);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_5);
__STORE(__h - 14, __reg_4_1);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_6);
__STORE(__h - 13, __reg_4_2);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_3_7);
__STORE(__h - 12, __reg_4_3);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_3_8);
__STORE(__h - 11, __reg_4_4);
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_3_0);
__STORE(__h - 10, __reg_4_5);
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__CALC4(__reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_3_1);
__STORE(__h - 9, __reg_4_6);
__LOAD(__reg_0, __h + 8);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__CALC4(__reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_3_2);
__STORE(__h - 8, __reg_4_7);
__reg_1_5 = __reg_0;
__LOAD(__reg_0, __h + 9);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__CALC4(__reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_3_3);
__STORE(__h - 7, __reg_4_8);
__reg_1_6 = __reg_0;
__LOAD(__reg_0, __h + 10);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__CALC4(__reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_4);
__STORE(__h - 6, __reg_4_0);
__reg_1_7 = __reg_0;
__LOAD(__reg_0, __h + 11);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_5);
__STORE(__h - 5, __reg_4_1);
__reg_1_8 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_6);
__STORE(__h - 4, __reg_4_2);
__reg_2_5 = __reg_1_5;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_3_7);
__STORE(__h - 3, __reg_4_3);
__reg_2_6 = __reg_1_6;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_3_8);
__STORE(__h - 2, __reg_4_4);
__reg_2_7 = __reg_1_7;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_3_0);
__STORE(__h - 1, __reg_4_5);
__reg_2_8 = __reg_1_8;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__CALC4(__reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_3_1);
__STORE(__h + 0, __reg_4_6);
__reg_3_5 = __reg_2_5;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__CALC4(__reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_3_2);
__STORE(__h + 1, __reg_4_7);
__reg_3_6 = __reg_2_6;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_7);
__CALC4(__reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_3_3);
__STORE(__h + 2, __reg_4_8);
__reg_3_7 = __reg_2_7;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_8);
__CALC4(__reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_4);
__STORE(__h + 3, __reg_4_0);
__reg_3_8 = __reg_2_8;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_5);
__STORE(__h + 4, __reg_4_1);
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_6);
__STORE(__h + 5, __reg_4_2);
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_4, __reg_4_3, __reg_3_7);
__STORE(__h + 6, __reg_4_3);
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_4, __reg_3_8);
__STORE(__h + 7, __reg_4_4);
}
}
else
{
for (__h = 33; __h <= __side1LenOl - 9;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__CALC4(__reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_3_3);
__STORE(__h - 16, __reg_4_8);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__CALC4(__reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_4);
__STORE(__h - 16, __reg_4_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_5);
__STORE(__h - 16, __reg_4_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_6);
__STORE(__h - 16, __reg_4_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_3_7);
__STORE(__h - 16, __reg_4_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_3_8);
__STORE(__h - 16, __reg_4_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_3_0);
__STORE(__h - 16, __reg_4_5);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__CALC4(__reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_3_1);
__STORE(__h - 16, __reg_4_6);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__CALC4(__reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_3_2);
__STORE(__h - 16, __reg_4_7);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__CALC4(__reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_3_3);
__STORE(__h - 16, __reg_4_8);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__CALC4(__reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_4);
__STORE(__h - 16, __reg_4_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_5);
__STORE(__h - 16, __reg_4_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_6);
__STORE(__h - 16, __reg_4_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_3_7);
__STORE(__h - 16, __reg_4_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_3_8);
__STORE(__h - 16, __reg_4_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_3_0);
__STORE(__h - 16, __reg_4_5);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__CALC4(__reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_3_1);
__STORE(__h - 16, __reg_4_6);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__CALC4(__reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_3_2);
__STORE(__h - 16, __reg_4_7);
__h++;
}
}
__global__ void kernel0_3(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 4 - 4);
const AN5D_TYPE __c1Pad = (4);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 4 - 4);
const AN5D_TYPE __c2Pad = (4);
#define __c2 c2
const AN5D_TYPE __halo1 = 4;
const AN5D_TYPE __halo2 = 4;
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 488;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_1_5;
double __reg_1_6;
double __reg_1_7;
double __reg_1_8;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_2_5;
double __reg_2_6;
double __reg_2_7;
double __reg_2_8;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_3_5;
double __reg_3_6;
double __reg_3_7;
double __reg_3_8;
__shared__ double __a_sb_double[__blockSize * 2];
double *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __storeValid = __writeValid3;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((0.00930f * (__SBREF(__a_sb, -4))) + (0.00931f * (__SBREF(__a_sb, -3)))) + (0.00932f * (__SBREF(__a_sb, -2)))) + (0.00933f * (__SBREF(__a_sb, -1)))) + (0.00934f * (__REGREF(__a, 0)))) + (0.00935f * (__SBREF(__a_sb, 1)))) + (0.00936f * (__SBREF(__a_sb, 2)))) + (0.00937f * (__SBREF(__a_sb, 3)))) + (0.00938f * (__SBREF(__a_sb, 4)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((0.00939f * (__SBREF(__a_sb, -4)))) + (0.00940f * (__SBREF(__a_sb, -3)))) + (0.00941f * (__SBREF(__a_sb, -2)))) + (0.00942f * (__SBREF(__a_sb, -1)))) + (0.00943f * (__REGREF(__a, 0)))) + (0.00944f * (__SBREF(__a_sb, 1)))) + (0.00945f * (__SBREF(__a_sb, 2)))) + (0.00946f * (__SBREF(__a_sb, 3)))) + (0.00947f * (__SBREF(__a_sb, 4))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((0.00948f * (__SBREF(__a_sb, -4)))) + (0.00949f * (__SBREF(__a_sb, -3)))) + (0.00950f * (__SBREF(__a_sb, -2)))) + (0.00951f * (__SBREF(__a_sb, -1)))) + (0.00952f * (__REGREF(__a, 0)))) + (0.00953f * (__SBREF(__a_sb, 1)))) + (0.00954f * (__SBREF(__a_sb, 2)))) + (0.00955f * (__SBREF(__a_sb, 3)))) + (0.00956f * (__SBREF(__a_sb, 4)))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((((((((((((((((((((0.00957f * (__SBREF(__a_sb, -4)))) + (0.00958f * (__SBREF(__a_sb, -3)))) + (0.00959f * (__SBREF(__a_sb, -2)))) + (0.00960f * (__SBREF(__a_sb, -1)))) + (0.00961f * (__REGREF(__a, 0)))) + (0.00962f * (__SBREF(__a_sb, 1)))) + (0.00963f * (__SBREF(__a_sb, 2)))) + (0.00964f * (__SBREF(__a_sb, 3)))) + (0.00965f * (__SBREF(__a_sb, 4))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { double etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((0.00966f * (__SBREF(__a_sb, -4)))) + (0.00967f * (__SBREF(__a_sb, -3)))) + (0.00968f * (__SBREF(__a_sb, -2)))) + (0.00969f * (__SBREF(__a_sb, -1)))) + (0.22400f * (__REGREF(__a, 0)))) + (0.00971f * (__SBREF(__a_sb, 1)))) + (0.00972f * (__SBREF(__a_sb, 2)))) + (0.00973f * (__SBREF(__a_sb, 3)))) + (0.00974f * (__SBREF(__a_sb, 4)))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { double etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_5_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((0.00975f * (__SBREF(__a_sb, -4)))) + (0.00976f * (__SBREF(__a_sb, -3)))) + (0.00977f * (__SBREF(__a_sb, -2)))) + (0.00978f * (__SBREF(__a_sb, -1)))) + (0.00979f * (__REGREF(__a, 0)))) + (0.00980f * (__SBREF(__a_sb, 1)))) + (0.00981f * (__SBREF(__a_sb, 2)))) + (0.00982f * (__SBREF(__a_sb, 3)))) + (0.00983f * (__SBREF(__a_sb, 4))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_5(out, a) do { double etmp; __CALCEXPR_5_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_6_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((0.00984f * (__SBREF(__a_sb, -4)))) + (0.00985f * (__SBREF(__a_sb, -3)))) + (0.00986f * (__SBREF(__a_sb, -2)))) + (0.00987f * (__SBREF(__a_sb, -1)))) + (0.00988f * (__REGREF(__a, 0)))) + (0.00989f * (__SBREF(__a_sb, 1)))) + (0.00990f * (__SBREF(__a_sb, 2)))) + (0.00991f * (__SBREF(__a_sb, 3)))) + (0.00992f * (__SBREF(__a_sb, 4)))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_6(out, a) do { double etmp; __CALCEXPR_6_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_7_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((0.00993f * (__SBREF(__a_sb, -4)))) + (0.00994f * (__SBREF(__a_sb, -3)))) + (0.00995f * (__SBREF(__a_sb, -2)))) + (0.00996f * (__SBREF(__a_sb, -1)))) + (0.00997f * (__REGREF(__a, 0)))) + (0.00998f * (__SBREF(__a_sb, 1)))) + (0.00999f * (__SBREF(__a_sb, 2)))) + (0.01000f * (__SBREF(__a_sb, 3)))) + (0.01001f * (__SBREF(__a_sb, 4))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_7(out, a) do { double etmp; __CALCEXPR_7_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_8_wrap(__rn0, __a) do { __rn0 = ((((((((((0.01002f * (__SBREF(__a_sb, -4)))) + (0.01003f * (__SBREF(__a_sb, -3)))) + (0.01004f * (__SBREF(__a_sb, -2)))) + (0.01005f * (__SBREF(__a_sb, -1)))) + (0.01006f * (__REGREF(__a, 0)))) + (0.01007f * (__SBREF(__a_sb, 1)))) + (0.01008f * (__SBREF(__a_sb, 2)))) + (0.01009f * (__SBREF(__a_sb, 3)))) + (0.01010f * (__SBREF(__a_sb, 4)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_8(out, a) do { double etmp; __CALCEXPR_8_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); __CALCEXPR_5(out5, reg); __CALCEXPR_6(out6, reg); __CALCEXPR_7(out7, reg); __CALCEXPR_8(out8, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg); } else out4 = reg; } while (0)
#define __CALC2(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg); } else out4 = reg; } while (0)
#define __CALC3(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg); } else out4 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_0);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_0);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_0);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_0);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__STORE(4, __reg_3_4);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__STORE(5, __reg_3_5);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__STORE(6, __reg_3_6);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__STORE(7, __reg_3_7);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__STORE(8, __reg_3_8);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__STORE(9, __reg_3_0);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__STORE(10, __reg_3_1);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__STORE(11, __reg_3_2);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__STORE(12, __reg_3_3);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__STORE(12, __reg_3_3);
}
__a_sb = __a_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 25; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 13;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__STORE(__h - 12, __reg_3_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__STORE(__h - 12, __reg_3_5);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__STORE(__h - 12, __reg_3_6);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__STORE(__h - 12, __reg_3_7);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__STORE(__h - 12, __reg_3_8);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__STORE(__h - 12, __reg_3_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__STORE(__h - 12, __reg_3_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__STORE(__h - 12, __reg_3_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__STORE(__h - 12, __reg_3_3);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__STORE(__h - 12, __reg_3_4);
__reg_1_7 = __reg_0;
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__STORE(__h - 11, __reg_3_5);
__reg_1_8 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__STORE(__h - 10, __reg_3_6);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__STORE(__h - 9, __reg_3_7);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__STORE(__h - 8, __reg_3_8);
__reg_2_7 = __reg_1_7;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__STORE(__h - 7, __reg_3_0);
__reg_2_8 = __reg_1_8;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_6, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__STORE(__h - 6, __reg_3_1);
__reg_2_0 = __reg_1_0;
__CALC2(__reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_6, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__STORE(__h - 5, __reg_3_2);
__reg_2_1 = __reg_1_1;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__STORE(__h - 4, __reg_3_3);
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__STORE(__h - 3, __reg_3_4);
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_6, __reg_3_5, __reg_2_0);
__STORE(__h - 2, __reg_3_5);
__CALC3(__reg_3_5, __reg_3_5, __reg_3_5, __reg_3_5, __reg_3_5, __reg_3_5, __reg_3_5, __reg_3_5, __reg_3_6, __reg_2_1);
__STORE(__h - 1, __reg_3_6);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__STORE(__h - 12, __reg_3_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__STORE(__h - 11, __reg_3_5);
__reg_1_8 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__STORE(__h - 10, __reg_3_6);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__STORE(__h - 9, __reg_3_7);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__STORE(__h - 8, __reg_3_8);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__STORE(__h - 7, __reg_3_0);
__reg_2_8 = __reg_1_8;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__STORE(__h - 6, __reg_3_1);
__reg_2_0 = __reg_1_0;
__CALC2(__reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_7, __reg_2_6, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__STORE(__h - 5, __reg_3_2);
__reg_2_1 = __reg_1_1;
__CALC2(__reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__STORE(__h - 4, __reg_3_3);
__reg_2_2 = __reg_1_2;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__STORE(__h - 3, __reg_3_4);
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__STORE(__h - 2, __reg_3_5);
__CALC3(__reg_3_5, __reg_3_5, __reg_3_5, __reg_3_5, __reg_3_5, __reg_3_5, __reg_3_5, __reg_3_7, __reg_3_6, __reg_2_1);
__STORE(__h - 1, __reg_3_6);
__CALC3(__reg_3_6, __reg_3_6, __reg_3_6, __reg_3_6, __reg_3_6, __reg_3_6, __reg_3_6, __reg_3_6, __reg_3_7, __reg_2_2);
__STORE(__h + 0, __reg_3_7);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__STORE(__h - 12, __reg_3_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__STORE(__h - 11, __reg_3_5);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__STORE(__h - 10, __reg_3_6);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__STORE(__h - 9, __reg_3_7);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__STORE(__h - 8, __reg_3_8);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__STORE(__h - 7, __reg_3_0);
__reg_1_3 = __reg_0;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__STORE(__h - 6, __reg_3_1);
__reg_2_0 = __reg_1_0;
__CALC2(__reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__STORE(__h - 5, __reg_3_2);
__reg_2_1 = __reg_1_1;
__CALC2(__reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__STORE(__h - 4, __reg_3_3);
__reg_2_2 = __reg_1_2;
__CALC2(__reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__STORE(__h - 3, __reg_3_4);
__reg_2_3 = __reg_1_3;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__STORE(__h - 2, __reg_3_5);
__CALC3(__reg_3_5, __reg_3_5, __reg_3_5, __reg_3_5, __reg_3_5, __reg_3_5, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__STORE(__h - 1, __reg_3_6);
__CALC3(__reg_3_6, __reg_3_6, __reg_3_6, __reg_3_6, __reg_3_6, __reg_3_6, __reg_3_6, __reg_3_8, __reg_3_7, __reg_2_2);
__STORE(__h + 0, __reg_3_7);
__CALC3(__reg_3_7, __reg_3_7, __reg_3_7, __reg_3_7, __reg_3_7, __reg_3_7, __reg_3_7, __reg_3_7, __reg_3_8, __reg_2_3);
__STORE(__h + 1, __reg_3_8);
}
else if (__h + 7 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__STORE(__h - 12, __reg_3_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__STORE(__h - 11, __reg_3_5);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__STORE(__h - 10, __reg_3_6);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__STORE(__h - 9, __reg_3_7);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__STORE(__h - 8, __reg_3_8);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__STORE(__h - 7, __reg_3_0);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__STORE(__h - 6, __reg_3_1);
__reg_1_4 = __reg_0;
__CALC2(__reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__STORE(__h - 5, __reg_3_2);
__reg_2_1 = __reg_1_1;
__CALC2(__reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__STORE(__h - 4, __reg_3_3);
__reg_2_2 = __reg_1_2;
__CALC2(__reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__STORE(__h - 3, __reg_3_4);
__reg_2_3 = __reg_1_3;
__CALC2(__reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__STORE(__h - 2, __reg_3_5);
__reg_2_4 = __reg_1_4;
__CALC3(__reg_3_5, __reg_3_5, __reg_3_5, __reg_3_5, __reg_3_5, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__STORE(__h - 1, __reg_3_6);
__CALC3(__reg_3_6, __reg_3_6, __reg_3_6, __reg_3_6, __reg_3_6, __reg_3_6, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__STORE(__h + 0, __reg_3_7);
__CALC3(__reg_3_7, __reg_3_7, __reg_3_7, __reg_3_7, __reg_3_7, __reg_3_7, __reg_3_7, __reg_3_0, __reg_3_8, __reg_2_3);
__STORE(__h + 1, __reg_3_8);
__CALC3(__reg_3_8, __reg_3_8, __reg_3_8, __reg_3_8, __reg_3_8, __reg_3_8, __reg_3_8, __reg_3_8, __reg_3_0, __reg_2_4);
__STORE(__h + 2, __reg_3_0);
}
else if (__h + 8 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__STORE(__h - 12, __reg_3_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__STORE(__h - 11, __reg_3_5);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__STORE(__h - 10, __reg_3_6);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__STORE(__h - 9, __reg_3_7);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__STORE(__h - 8, __reg_3_8);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__STORE(__h - 7, __reg_3_0);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__STORE(__h - 6, __reg_3_1);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__STORE(__h - 5, __reg_3_2);
__reg_1_5 = __reg_0;
__CALC2(__reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__STORE(__h - 4, __reg_3_3);
__reg_2_2 = __reg_1_2;
__CALC2(__reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__STORE(__h - 3, __reg_3_4);
__reg_2_3 = __reg_1_3;
__CALC2(__reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__STORE(__h - 2, __reg_3_5);
__reg_2_4 = __reg_1_4;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__STORE(__h - 1, __reg_3_6);
__reg_2_5 = __reg_1_5;
__CALC3(__reg_3_6, __reg_3_6, __reg_3_6, __reg_3_6, __reg_3_6, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__STORE(__h + 0, __reg_3_7);
__CALC3(__reg_3_7, __reg_3_7, __reg_3_7, __reg_3_7, __reg_3_7, __reg_3_7, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__STORE(__h + 1, __reg_3_8);
__CALC3(__reg_3_8, __reg_3_8, __reg_3_8, __reg_3_8, __reg_3_8, __reg_3_8, __reg_3_8, __reg_3_1, __reg_3_0, __reg_2_4);
__STORE(__h + 2, __reg_3_0);
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_5);
__STORE(__h + 3, __reg_3_1);
}
else if (__h + 9 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__STORE(__h - 12, __reg_3_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__STORE(__h - 11, __reg_3_5);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__STORE(__h - 10, __reg_3_6);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__STORE(__h - 9, __reg_3_7);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__STORE(__h - 8, __reg_3_8);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__STORE(__h - 7, __reg_3_0);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__STORE(__h - 6, __reg_3_1);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__STORE(__h - 5, __reg_3_2);
__reg_1_5 = __reg_0;
__LOAD(__reg_0, __h + 8);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__STORE(__h - 4, __reg_3_3);
__reg_1_6 = __reg_0;
__CALC2(__reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__STORE(__h - 3, __reg_3_4);
__reg_2_3 = __reg_1_3;
__CALC2(__reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__STORE(__h - 2, __reg_3_5);
__reg_2_4 = __reg_1_4;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__STORE(__h - 1, __reg_3_6);
__reg_2_5 = __reg_1_5;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__STORE(__h + 0, __reg_3_7);
__reg_2_6 = __reg_1_6;
__CALC3(__reg_3_7, __reg_3_7, __reg_3_7, __reg_3_7, __reg_3_7, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__STORE(__h + 1, __reg_3_8);
__CALC3(__reg_3_8, __reg_3_8, __reg_3_8, __reg_3_8, __reg_3_8, __reg_3_8, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__STORE(__h + 2, __reg_3_0);
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_5);
__STORE(__h + 3, __reg_3_1);
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_6);
__STORE(__h + 4, __reg_3_2);
}
else if (__h + 10 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__STORE(__h - 12, __reg_3_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__STORE(__h - 11, __reg_3_5);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__STORE(__h - 10, __reg_3_6);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__STORE(__h - 9, __reg_3_7);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__STORE(__h - 8, __reg_3_8);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__STORE(__h - 7, __reg_3_0);
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__STORE(__h - 6, __reg_3_1);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__STORE(__h - 5, __reg_3_2);
__reg_1_5 = __reg_0;
__LOAD(__reg_0, __h + 8);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__STORE(__h - 4, __reg_3_3);
__reg_1_6 = __reg_0;
__LOAD(__reg_0, __h + 9);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__STORE(__h - 3, __reg_3_4);
__reg_1_7 = __reg_0;
__CALC2(__reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__STORE(__h - 2, __reg_3_5);
__reg_2_4 = __reg_1_4;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__STORE(__h - 1, __reg_3_6);
__reg_2_5 = __reg_1_5;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__STORE(__h + 0, __reg_3_7);
__reg_2_6 = __reg_1_6;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__STORE(__h + 1, __reg_3_8);
__reg_2_7 = __reg_1_7;
__CALC3(__reg_3_8, __reg_3_8, __reg_3_8, __reg_3_8, __reg_3_8, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__STORE(__h + 2, __reg_3_0);
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__STORE(__h + 3, __reg_3_1);
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_6);
__STORE(__h + 4, __reg_3_2);
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_7);
__STORE(__h + 5, __reg_3_3);
}
else if (__h + 11 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__STORE(__h - 12, __reg_3_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__STORE(__h - 11, __reg_3_5);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__STORE(__h - 10, __reg_3_6);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__STORE(__h - 9, __reg_3_7);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__STORE(__h - 8, __reg_3_8);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__STORE(__h - 7, __reg_3_0);
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__STORE(__h - 6, __reg_3_1);
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__STORE(__h - 5, __reg_3_2);
__reg_1_5 = __reg_0;
__LOAD(__reg_0, __h + 8);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__STORE(__h - 4, __reg_3_3);
__reg_1_6 = __reg_0;
__LOAD(__reg_0, __h + 9);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__STORE(__h - 3, __reg_3_4);
__reg_1_7 = __reg_0;
__LOAD(__reg_0, __h + 10);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__STORE(__h - 2, __reg_3_5);
__reg_1_8 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__STORE(__h - 1, __reg_3_6);
__reg_2_5 = __reg_1_5;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__STORE(__h + 0, __reg_3_7);
__reg_2_6 = __reg_1_6;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__STORE(__h + 1, __reg_3_8);
__reg_2_7 = __reg_1_7;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__STORE(__h + 2, __reg_3_0);
__reg_2_8 = __reg_1_8;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__STORE(__h + 3, __reg_3_1);
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__STORE(__h + 4, __reg_3_2);
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_7);
__STORE(__h + 5, __reg_3_3);
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_8);
__STORE(__h + 6, __reg_3_4);
}
else if (__h + 12 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__STORE(__h - 12, __reg_3_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__STORE(__h - 11, __reg_3_5);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__STORE(__h - 10, __reg_3_6);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__STORE(__h - 9, __reg_3_7);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__STORE(__h - 8, __reg_3_8);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__STORE(__h - 7, __reg_3_0);
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__STORE(__h - 6, __reg_3_1);
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__STORE(__h - 5, __reg_3_2);
__LOAD(__reg_0, __h + 8);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__STORE(__h - 4, __reg_3_3);
__reg_1_6 = __reg_0;
__LOAD(__reg_0, __h + 9);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__STORE(__h - 3, __reg_3_4);
__reg_1_7 = __reg_0;
__LOAD(__reg_0, __h + 10);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__STORE(__h - 2, __reg_3_5);
__reg_1_8 = __reg_0;
__LOAD(__reg_0, __h + 11);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__STORE(__h - 1, __reg_3_6);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__STORE(__h + 0, __reg_3_7);
__reg_2_6 = __reg_1_6;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__STORE(__h + 1, __reg_3_8);
__reg_2_7 = __reg_1_7;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__STORE(__h + 2, __reg_3_0);
__reg_2_8 = __reg_1_8;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__STORE(__h + 3, __reg_3_1);
__reg_2_0 = __reg_1_0;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__STORE(__h + 4, __reg_3_2);
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__STORE(__h + 5, __reg_3_3);
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_5, __reg_3_4, __reg_2_8);
__STORE(__h + 6, __reg_3_4);
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_5, __reg_2_0);
__STORE(__h + 7, __reg_3_5);
}
}
else
{
for (__h = 25; __h <= __side1LenOl - 9;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__STORE(__h - 12, __reg_3_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__STORE(__h - 12, __reg_3_5);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__STORE(__h - 12, __reg_3_6);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__STORE(__h - 12, __reg_3_7);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__STORE(__h - 12, __reg_3_8);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__STORE(__h - 12, __reg_3_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__STORE(__h - 12, __reg_3_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__STORE(__h - 12, __reg_3_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__STORE(__h - 12, __reg_3_3);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__STORE(__h - 12, __reg_3_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__STORE(__h - 12, __reg_3_5);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__STORE(__h - 12, __reg_3_6);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__STORE(__h - 12, __reg_3_7);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__STORE(__h - 12, __reg_3_8);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__STORE(__h - 12, __reg_3_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__STORE(__h - 12, __reg_3_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__STORE(__h - 12, __reg_3_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__STORE(__h - 12, __reg_3_3);
__h++;
}
}
__global__ void kernel0_2(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 4 - 4);
const AN5D_TYPE __c1Pad = (4);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 4 - 4);
const AN5D_TYPE __c2Pad = (4);
#define __c2 c2
const AN5D_TYPE __halo1 = 4;
const AN5D_TYPE __halo2 = 4;
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_1_5;
double __reg_1_6;
double __reg_1_7;
double __reg_1_8;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_2_5;
double __reg_2_6;
double __reg_2_7;
double __reg_2_8;
__shared__ double __a_sb_double[__blockSize * 2];
double *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __storeValid = __writeValid2;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((0.00930f * (__SBREF(__a_sb, -4))) + (0.00931f * (__SBREF(__a_sb, -3)))) + (0.00932f * (__SBREF(__a_sb, -2)))) + (0.00933f * (__SBREF(__a_sb, -1)))) + (0.00934f * (__REGREF(__a, 0)))) + (0.00935f * (__SBREF(__a_sb, 1)))) + (0.00936f * (__SBREF(__a_sb, 2)))) + (0.00937f * (__SBREF(__a_sb, 3)))) + (0.00938f * (__SBREF(__a_sb, 4)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((0.00939f * (__SBREF(__a_sb, -4)))) + (0.00940f * (__SBREF(__a_sb, -3)))) + (0.00941f * (__SBREF(__a_sb, -2)))) + (0.00942f * (__SBREF(__a_sb, -1)))) + (0.00943f * (__REGREF(__a, 0)))) + (0.00944f * (__SBREF(__a_sb, 1)))) + (0.00945f * (__SBREF(__a_sb, 2)))) + (0.00946f * (__SBREF(__a_sb, 3)))) + (0.00947f * (__SBREF(__a_sb, 4))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((0.00948f * (__SBREF(__a_sb, -4)))) + (0.00949f * (__SBREF(__a_sb, -3)))) + (0.00950f * (__SBREF(__a_sb, -2)))) + (0.00951f * (__SBREF(__a_sb, -1)))) + (0.00952f * (__REGREF(__a, 0)))) + (0.00953f * (__SBREF(__a_sb, 1)))) + (0.00954f * (__SBREF(__a_sb, 2)))) + (0.00955f * (__SBREF(__a_sb, 3)))) + (0.00956f * (__SBREF(__a_sb, 4)))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((((((((((((((((((((0.00957f * (__SBREF(__a_sb, -4)))) + (0.00958f * (__SBREF(__a_sb, -3)))) + (0.00959f * (__SBREF(__a_sb, -2)))) + (0.00960f * (__SBREF(__a_sb, -1)))) + (0.00961f * (__REGREF(__a, 0)))) + (0.00962f * (__SBREF(__a_sb, 1)))) + (0.00963f * (__SBREF(__a_sb, 2)))) + (0.00964f * (__SBREF(__a_sb, 3)))) + (0.00965f * (__SBREF(__a_sb, 4))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { double etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((0.00966f * (__SBREF(__a_sb, -4)))) + (0.00967f * (__SBREF(__a_sb, -3)))) + (0.00968f * (__SBREF(__a_sb, -2)))) + (0.00969f * (__SBREF(__a_sb, -1)))) + (0.22400f * (__REGREF(__a, 0)))) + (0.00971f * (__SBREF(__a_sb, 1)))) + (0.00972f * (__SBREF(__a_sb, 2)))) + (0.00973f * (__SBREF(__a_sb, 3)))) + (0.00974f * (__SBREF(__a_sb, 4)))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { double etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_5_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((0.00975f * (__SBREF(__a_sb, -4)))) + (0.00976f * (__SBREF(__a_sb, -3)))) + (0.00977f * (__SBREF(__a_sb, -2)))) + (0.00978f * (__SBREF(__a_sb, -1)))) + (0.00979f * (__REGREF(__a, 0)))) + (0.00980f * (__SBREF(__a_sb, 1)))) + (0.00981f * (__SBREF(__a_sb, 2)))) + (0.00982f * (__SBREF(__a_sb, 3)))) + (0.00983f * (__SBREF(__a_sb, 4))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_5(out, a) do { double etmp; __CALCEXPR_5_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_6_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((0.00984f * (__SBREF(__a_sb, -4)))) + (0.00985f * (__SBREF(__a_sb, -3)))) + (0.00986f * (__SBREF(__a_sb, -2)))) + (0.00987f * (__SBREF(__a_sb, -1)))) + (0.00988f * (__REGREF(__a, 0)))) + (0.00989f * (__SBREF(__a_sb, 1)))) + (0.00990f * (__SBREF(__a_sb, 2)))) + (0.00991f * (__SBREF(__a_sb, 3)))) + (0.00992f * (__SBREF(__a_sb, 4)))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_6(out, a) do { double etmp; __CALCEXPR_6_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_7_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((0.00993f * (__SBREF(__a_sb, -4)))) + (0.00994f * (__SBREF(__a_sb, -3)))) + (0.00995f * (__SBREF(__a_sb, -2)))) + (0.00996f * (__SBREF(__a_sb, -1)))) + (0.00997f * (__REGREF(__a, 0)))) + (0.00998f * (__SBREF(__a_sb, 1)))) + (0.00999f * (__SBREF(__a_sb, 2)))) + (0.01000f * (__SBREF(__a_sb, 3)))) + (0.01001f * (__SBREF(__a_sb, 4))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_7(out, a) do { double etmp; __CALCEXPR_7_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_8_wrap(__rn0, __a) do { __rn0 = ((((((((((0.01002f * (__SBREF(__a_sb, -4)))) + (0.01003f * (__SBREF(__a_sb, -3)))) + (0.01004f * (__SBREF(__a_sb, -2)))) + (0.01005f * (__SBREF(__a_sb, -1)))) + (0.01006f * (__REGREF(__a, 0)))) + (0.01007f * (__SBREF(__a_sb, 1)))) + (0.01008f * (__SBREF(__a_sb, 2)))) + (0.01009f * (__SBREF(__a_sb, 3)))) + (0.01010f * (__SBREF(__a_sb, 4)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_8(out, a) do { double etmp; __CALCEXPR_8_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); __CALCEXPR_5(out5, reg); __CALCEXPR_6(out6, reg); __CALCEXPR_7(out7, reg); __CALCEXPR_8(out8, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg); } else out4 = reg; } while (0)
#define __CALC2(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg); } else out4 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__STORE(4, __reg_2_4);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__STORE(5, __reg_2_5);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__STORE(6, __reg_2_6);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__STORE(7, __reg_2_7);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__STORE(8, __reg_2_8);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__STORE(8, __reg_2_8);
}
__a_sb = __a_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 17; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 13;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__STORE(__h - 8, __reg_2_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__STORE(__h - 8, __reg_2_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__STORE(__h - 8, __reg_2_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__STORE(__h - 8, __reg_2_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__STORE(__h - 8, __reg_2_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__STORE(__h - 8, __reg_2_5);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__STORE(__h - 8, __reg_2_6);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__STORE(__h - 8, __reg_2_7);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__STORE(__h - 8, __reg_2_8);
__h++;
}
if (0) {}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__STORE(__h - 8, __reg_2_0);
__reg_1_8 = __reg_0;
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__STORE(__h - 7, __reg_2_1);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__STORE(__h - 6, __reg_2_2);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__STORE(__h - 5, __reg_2_3);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__STORE(__h - 4, __reg_2_4);
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__STORE(__h - 3, __reg_2_5);
__CALC2(__reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_7, __reg_2_6, __reg_1_1);
__STORE(__h - 2, __reg_2_6);
__CALC2(__reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_7, __reg_1_2);
__STORE(__h - 1, __reg_2_7);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__STORE(__h - 8, __reg_2_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__STORE(__h - 7, __reg_2_1);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__STORE(__h - 6, __reg_2_2);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__STORE(__h - 5, __reg_2_3);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__STORE(__h - 4, __reg_2_4);
__reg_1_3 = __reg_0;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__STORE(__h - 3, __reg_2_5);
__CALC2(__reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__STORE(__h - 2, __reg_2_6);
__CALC2(__reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_8, __reg_2_7, __reg_1_2);
__STORE(__h - 1, __reg_2_7);
__CALC2(__reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_8, __reg_1_3);
__STORE(__h + 0, __reg_2_8);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__STORE(__h - 8, __reg_2_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__STORE(__h - 7, __reg_2_1);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__STORE(__h - 6, __reg_2_2);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__STORE(__h - 5, __reg_2_3);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__STORE(__h - 4, __reg_2_4);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__STORE(__h - 3, __reg_2_5);
__reg_1_4 = __reg_0;
__CALC2(__reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__STORE(__h - 2, __reg_2_6);
__CALC2(__reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__STORE(__h - 1, __reg_2_7);
__CALC2(__reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_0, __reg_2_8, __reg_1_3);
__STORE(__h + 0, __reg_2_8);
__CALC2(__reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_0, __reg_1_4);
__STORE(__h + 1, __reg_2_0);
}
else if (__h + 7 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__STORE(__h - 8, __reg_2_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__STORE(__h - 7, __reg_2_1);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__STORE(__h - 6, __reg_2_2);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__STORE(__h - 5, __reg_2_3);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__STORE(__h - 4, __reg_2_4);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__STORE(__h - 3, __reg_2_5);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__STORE(__h - 2, __reg_2_6);
__reg_1_5 = __reg_0;
__CALC2(__reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__STORE(__h - 1, __reg_2_7);
__CALC2(__reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__STORE(__h + 0, __reg_2_8);
__CALC2(__reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_1, __reg_2_0, __reg_1_4);
__STORE(__h + 1, __reg_2_0);
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_5);
__STORE(__h + 2, __reg_2_1);
}
else if (__h + 8 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__STORE(__h - 8, __reg_2_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__STORE(__h - 7, __reg_2_1);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__STORE(__h - 6, __reg_2_2);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__STORE(__h - 5, __reg_2_3);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__STORE(__h - 4, __reg_2_4);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__STORE(__h - 3, __reg_2_5);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__STORE(__h - 2, __reg_2_6);
__reg_1_5 = __reg_0;
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__STORE(__h - 1, __reg_2_7);
__reg_1_6 = __reg_0;
__CALC2(__reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__STORE(__h + 0, __reg_2_8);
__CALC2(__reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__STORE(__h + 1, __reg_2_0);
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_5);
__STORE(__h + 2, __reg_2_1);
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_6);
__STORE(__h + 3, __reg_2_2);
}
else if (__h + 9 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__STORE(__h - 8, __reg_2_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__STORE(__h - 7, __reg_2_1);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__STORE(__h - 6, __reg_2_2);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__STORE(__h - 5, __reg_2_3);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__STORE(__h - 4, __reg_2_4);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__STORE(__h - 3, __reg_2_5);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__STORE(__h - 2, __reg_2_6);
__reg_1_5 = __reg_0;
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__STORE(__h - 1, __reg_2_7);
__reg_1_6 = __reg_0;
__LOAD(__reg_0, __h + 8);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__STORE(__h + 0, __reg_2_8);
__reg_1_7 = __reg_0;
__CALC2(__reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__STORE(__h + 1, __reg_2_0);
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__STORE(__h + 2, __reg_2_1);
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_6);
__STORE(__h + 3, __reg_2_2);
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_7);
__STORE(__h + 4, __reg_2_3);
}
else if (__h + 10 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__STORE(__h - 8, __reg_2_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__STORE(__h - 7, __reg_2_1);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__STORE(__h - 6, __reg_2_2);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__STORE(__h - 5, __reg_2_3);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__STORE(__h - 4, __reg_2_4);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__STORE(__h - 3, __reg_2_5);
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__STORE(__h - 2, __reg_2_6);
__reg_1_5 = __reg_0;
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__STORE(__h - 1, __reg_2_7);
__reg_1_6 = __reg_0;
__LOAD(__reg_0, __h + 8);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__STORE(__h + 0, __reg_2_8);
__reg_1_7 = __reg_0;
__LOAD(__reg_0, __h + 9);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__STORE(__h + 1, __reg_2_0);
__reg_1_8 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__STORE(__h + 2, __reg_2_1);
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__STORE(__h + 3, __reg_2_2);
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_7);
__STORE(__h + 4, __reg_2_3);
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_8);
__STORE(__h + 5, __reg_2_4);
}
else if (__h + 11 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__STORE(__h - 8, __reg_2_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__STORE(__h - 7, __reg_2_1);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__STORE(__h - 6, __reg_2_2);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__STORE(__h - 5, __reg_2_3);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__STORE(__h - 4, __reg_2_4);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__STORE(__h - 3, __reg_2_5);
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__STORE(__h - 2, __reg_2_6);
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__STORE(__h - 1, __reg_2_7);
__reg_1_6 = __reg_0;
__LOAD(__reg_0, __h + 8);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__STORE(__h + 0, __reg_2_8);
__reg_1_7 = __reg_0;
__LOAD(__reg_0, __h + 9);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__STORE(__h + 1, __reg_2_0);
__reg_1_8 = __reg_0;
__LOAD(__reg_0, __h + 10);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__STORE(__h + 2, __reg_2_1);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__STORE(__h + 3, __reg_2_2);
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__STORE(__h + 4, __reg_2_3);
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_5, __reg_2_4, __reg_1_8);
__STORE(__h + 5, __reg_2_4);
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_5, __reg_1_0);
__STORE(__h + 6, __reg_2_5);
}
else if (__h + 12 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__STORE(__h - 8, __reg_2_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__STORE(__h - 7, __reg_2_1);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__STORE(__h - 6, __reg_2_2);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__STORE(__h - 5, __reg_2_3);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__STORE(__h - 4, __reg_2_4);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__STORE(__h - 3, __reg_2_5);
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__STORE(__h - 2, __reg_2_6);
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__STORE(__h - 1, __reg_2_7);
__LOAD(__reg_0, __h + 8);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__STORE(__h + 0, __reg_2_8);
__reg_1_7 = __reg_0;
__LOAD(__reg_0, __h + 9);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__STORE(__h + 1, __reg_2_0);
__reg_1_8 = __reg_0;
__LOAD(__reg_0, __h + 10);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__STORE(__h + 2, __reg_2_1);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 11);
__CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__STORE(__h + 3, __reg_2_2);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__STORE(__h + 4, __reg_2_3);
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__STORE(__h + 5, __reg_2_4);
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_6, __reg_2_5, __reg_1_0);
__STORE(__h + 6, __reg_2_5);
__CALC2(__reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_6, __reg_1_1);
__STORE(__h + 7, __reg_2_6);
}
}
else
{
for (__h = 17; __h <= __side1LenOl - 9;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__STORE(__h - 8, __reg_2_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__STORE(__h - 8, __reg_2_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__STORE(__h - 8, __reg_2_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__STORE(__h - 8, __reg_2_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__STORE(__h - 8, __reg_2_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__STORE(__h - 8, __reg_2_5);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__STORE(__h - 8, __reg_2_6);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__STORE(__h - 8, __reg_2_7);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__STORE(__h - 8, __reg_2_8);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__STORE(__h - 8, __reg_2_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__STORE(__h - 8, __reg_2_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__STORE(__h - 8, __reg_2_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__STORE(__h - 8, __reg_2_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__STORE(__h - 8, __reg_2_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__STORE(__h - 8, __reg_2_5);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__STORE(__h - 8, __reg_2_6);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__STORE(__h - 8, __reg_2_7);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__STORE(__h - 8, __reg_2_8);
__h++;
}
}
__global__ void kernel0_1(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 4 - 4);
const AN5D_TYPE __c1Pad = (4);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 4 - 4);
const AN5D_TYPE __c2Pad = (4);
#define __c2 c2
const AN5D_TYPE __halo1 = 4;
const AN5D_TYPE __halo2 = 4;
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_1_5;
double __reg_1_6;
double __reg_1_7;
double __reg_1_8;
__shared__ double __a_sb_double[__blockSize * 2];
double *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __storeValid = __writeValid1;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((0.00930f * (__SBREF(__a_sb, -4))) + (0.00931f * (__SBREF(__a_sb, -3)))) + (0.00932f * (__SBREF(__a_sb, -2)))) + (0.00933f * (__SBREF(__a_sb, -1)))) + (0.00934f * (__REGREF(__a, 0)))) + (0.00935f * (__SBREF(__a_sb, 1)))) + (0.00936f * (__SBREF(__a_sb, 2)))) + (0.00937f * (__SBREF(__a_sb, 3)))) + (0.00938f * (__SBREF(__a_sb, 4)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((0.00939f * (__SBREF(__a_sb, -4)))) + (0.00940f * (__SBREF(__a_sb, -3)))) + (0.00941f * (__SBREF(__a_sb, -2)))) + (0.00942f * (__SBREF(__a_sb, -1)))) + (0.00943f * (__REGREF(__a, 0)))) + (0.00944f * (__SBREF(__a_sb, 1)))) + (0.00945f * (__SBREF(__a_sb, 2)))) + (0.00946f * (__SBREF(__a_sb, 3)))) + (0.00947f * (__SBREF(__a_sb, 4))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((0.00948f * (__SBREF(__a_sb, -4)))) + (0.00949f * (__SBREF(__a_sb, -3)))) + (0.00950f * (__SBREF(__a_sb, -2)))) + (0.00951f * (__SBREF(__a_sb, -1)))) + (0.00952f * (__REGREF(__a, 0)))) + (0.00953f * (__SBREF(__a_sb, 1)))) + (0.00954f * (__SBREF(__a_sb, 2)))) + (0.00955f * (__SBREF(__a_sb, 3)))) + (0.00956f * (__SBREF(__a_sb, 4)))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((((((((((((((((((((0.00957f * (__SBREF(__a_sb, -4)))) + (0.00958f * (__SBREF(__a_sb, -3)))) + (0.00959f * (__SBREF(__a_sb, -2)))) + (0.00960f * (__SBREF(__a_sb, -1)))) + (0.00961f * (__REGREF(__a, 0)))) + (0.00962f * (__SBREF(__a_sb, 1)))) + (0.00963f * (__SBREF(__a_sb, 2)))) + (0.00964f * (__SBREF(__a_sb, 3)))) + (0.00965f * (__SBREF(__a_sb, 4))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { double etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((0.00966f * (__SBREF(__a_sb, -4)))) + (0.00967f * (__SBREF(__a_sb, -3)))) + (0.00968f * (__SBREF(__a_sb, -2)))) + (0.00969f * (__SBREF(__a_sb, -1)))) + (0.22400f * (__REGREF(__a, 0)))) + (0.00971f * (__SBREF(__a_sb, 1)))) + (0.00972f * (__SBREF(__a_sb, 2)))) + (0.00973f * (__SBREF(__a_sb, 3)))) + (0.00974f * (__SBREF(__a_sb, 4)))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { double etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_5_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((0.00975f * (__SBREF(__a_sb, -4)))) + (0.00976f * (__SBREF(__a_sb, -3)))) + (0.00977f * (__SBREF(__a_sb, -2)))) + (0.00978f * (__SBREF(__a_sb, -1)))) + (0.00979f * (__REGREF(__a, 0)))) + (0.00980f * (__SBREF(__a_sb, 1)))) + (0.00981f * (__SBREF(__a_sb, 2)))) + (0.00982f * (__SBREF(__a_sb, 3)))) + (0.00983f * (__SBREF(__a_sb, 4))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_5(out, a) do { double etmp; __CALCEXPR_5_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_6_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((0.00984f * (__SBREF(__a_sb, -4)))) + (0.00985f * (__SBREF(__a_sb, -3)))) + (0.00986f * (__SBREF(__a_sb, -2)))) + (0.00987f * (__SBREF(__a_sb, -1)))) + (0.00988f * (__REGREF(__a, 0)))) + (0.00989f * (__SBREF(__a_sb, 1)))) + (0.00990f * (__SBREF(__a_sb, 2)))) + (0.00991f * (__SBREF(__a_sb, 3)))) + (0.00992f * (__SBREF(__a_sb, 4)))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_6(out, a) do { double etmp; __CALCEXPR_6_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_7_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((0.00993f * (__SBREF(__a_sb, -4)))) + (0.00994f * (__SBREF(__a_sb, -3)))) + (0.00995f * (__SBREF(__a_sb, -2)))) + (0.00996f * (__SBREF(__a_sb, -1)))) + (0.00997f * (__REGREF(__a, 0)))) + (0.00998f * (__SBREF(__a_sb, 1)))) + (0.00999f * (__SBREF(__a_sb, 2)))) + (0.01000f * (__SBREF(__a_sb, 3)))) + (0.01001f * (__SBREF(__a_sb, 4))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_7(out, a) do { double etmp; __CALCEXPR_7_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_8_wrap(__rn0, __a) do { __rn0 = ((((((((((0.01002f * (__SBREF(__a_sb, -4)))) + (0.01003f * (__SBREF(__a_sb, -3)))) + (0.01004f * (__SBREF(__a_sb, -2)))) + (0.01005f * (__SBREF(__a_sb, -1)))) + (0.01006f * (__REGREF(__a, 0)))) + (0.01007f * (__SBREF(__a_sb, 1)))) + (0.01008f * (__SBREF(__a_sb, 2)))) + (0.01009f * (__SBREF(__a_sb, 3)))) + (0.01010f * (__SBREF(__a_sb, 4)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_8(out, a) do { double etmp; __CALCEXPR_8_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); __CALCEXPR_5(out5, reg); __CALCEXPR_6(out6, reg); __CALCEXPR_7(out7, reg); __CALCEXPR_8(out8, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg); } else out4 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__STORE(4, __reg_1_4);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__STORE(4, __reg_1_4);
}
__a_sb = __a_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 13;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 4, __reg_1_6);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 4, __reg_1_7);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 4, __reg_1_8);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 4, __reg_1_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 4, __reg_1_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h - 4, __reg_1_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 4, __reg_1_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__STORE(__h - 4, __reg_1_4);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 3, __reg_1_6);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 2, __reg_1_7);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_8, __reg_0);
__STORE(__h - 1, __reg_1_8);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 3, __reg_1_6);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 2, __reg_1_7);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 1, __reg_1_8);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 3, __reg_1_6);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 2, __reg_1_7);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 1, __reg_1_8);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
}
else if (__h + 7 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 3, __reg_1_6);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 2, __reg_1_7);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 1, __reg_1_8);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__STORE(__h + 2, __reg_1_2);
}
else if (__h + 8 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 3, __reg_1_6);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 2, __reg_1_7);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 1, __reg_1_8);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h + 2, __reg_1_2);
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__STORE(__h + 3, __reg_1_3);
}
else if (__h + 9 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 3, __reg_1_6);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 2, __reg_1_7);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 1, __reg_1_8);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h + 2, __reg_1_2);
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h + 3, __reg_1_3);
__LOAD(__reg_0, __h + 8);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__STORE(__h + 4, __reg_1_4);
}
else if (__h + 10 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 3, __reg_1_6);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 2, __reg_1_7);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 1, __reg_1_8);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h + 2, __reg_1_2);
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h + 3, __reg_1_3);
__LOAD(__reg_0, __h + 8);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_5, __reg_1_4, __reg_0);
__STORE(__h + 4, __reg_1_4);
__LOAD(__reg_0, __h + 9);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_5, __reg_0);
__STORE(__h + 5, __reg_1_5);
}
else if (__h + 11 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 3, __reg_1_6);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 2, __reg_1_7);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 1, __reg_1_8);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h + 2, __reg_1_2);
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h + 3, __reg_1_3);
__LOAD(__reg_0, __h + 8);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__STORE(__h + 4, __reg_1_4);
__LOAD(__reg_0, __h + 9);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h + 5, __reg_1_5);
__LOAD(__reg_0, __h + 10);
__CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_6, __reg_0);
__STORE(__h + 6, __reg_1_6);
}
else if (__h + 12 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 3, __reg_1_6);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 2, __reg_1_7);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 1, __reg_1_8);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h + 2, __reg_1_2);
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h + 3, __reg_1_3);
__LOAD(__reg_0, __h + 8);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__STORE(__h + 4, __reg_1_4);
__LOAD(__reg_0, __h + 9);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h + 5, __reg_1_5);
__LOAD(__reg_0, __h + 10);
__CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h + 6, __reg_1_6);
__LOAD(__reg_0, __h + 11);
__CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_7, __reg_0);
__STORE(__h + 7, __reg_1_7);
}
}
else
{
for (__h = 9; __h <= __side1LenOl - 9;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 4, __reg_1_6);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 4, __reg_1_7);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 4, __reg_1_8);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 4, __reg_1_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 4, __reg_1_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h - 4, __reg_1_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 4, __reg_1_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__STORE(__h - 4, __reg_1_4);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 4, __reg_1_6);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 4, __reg_1_7);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 4, __reg_1_8);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 4, __reg_1_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 4, __reg_1_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h - 4, __reg_1_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 4, __reg_1_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__STORE(__h - 4, __reg_1_4);
__h++;
}
}
| 41e5c3aa03ae63b8cf584642e6894df8a285a028.cu | #include "box2d4r-512-4-256_kernel.hu"
__device__ double __sbref_wrap(double *sb, size_t index) { return sb[index]; }
__global__ void kernel0_4(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 4 - 4);
const AN5D_TYPE __c1Pad = (4);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 4 - 4);
const AN5D_TYPE __c2Pad = (4);
#define __c2 c2
const AN5D_TYPE __halo1 = 4;
const AN5D_TYPE __halo2 = 4;
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 480;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_1_5;
double __reg_1_6;
double __reg_1_7;
double __reg_1_8;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_2_5;
double __reg_2_6;
double __reg_2_7;
double __reg_2_8;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_3_5;
double __reg_3_6;
double __reg_3_7;
double __reg_3_8;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
double __reg_4_5;
double __reg_4_6;
double __reg_4_7;
double __reg_4_8;
__shared__ double __a_sb_double[__blockSize * 2];
double *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __storeValid = __writeValid4;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((0.00930f * (__SBREF(__a_sb, -4))) + (0.00931f * (__SBREF(__a_sb, -3)))) + (0.00932f * (__SBREF(__a_sb, -2)))) + (0.00933f * (__SBREF(__a_sb, -1)))) + (0.00934f * (__REGREF(__a, 0)))) + (0.00935f * (__SBREF(__a_sb, 1)))) + (0.00936f * (__SBREF(__a_sb, 2)))) + (0.00937f * (__SBREF(__a_sb, 3)))) + (0.00938f * (__SBREF(__a_sb, 4)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((0.00939f * (__SBREF(__a_sb, -4)))) + (0.00940f * (__SBREF(__a_sb, -3)))) + (0.00941f * (__SBREF(__a_sb, -2)))) + (0.00942f * (__SBREF(__a_sb, -1)))) + (0.00943f * (__REGREF(__a, 0)))) + (0.00944f * (__SBREF(__a_sb, 1)))) + (0.00945f * (__SBREF(__a_sb, 2)))) + (0.00946f * (__SBREF(__a_sb, 3)))) + (0.00947f * (__SBREF(__a_sb, 4))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((0.00948f * (__SBREF(__a_sb, -4)))) + (0.00949f * (__SBREF(__a_sb, -3)))) + (0.00950f * (__SBREF(__a_sb, -2)))) + (0.00951f * (__SBREF(__a_sb, -1)))) + (0.00952f * (__REGREF(__a, 0)))) + (0.00953f * (__SBREF(__a_sb, 1)))) + (0.00954f * (__SBREF(__a_sb, 2)))) + (0.00955f * (__SBREF(__a_sb, 3)))) + (0.00956f * (__SBREF(__a_sb, 4)))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((((((((((((((((((((0.00957f * (__SBREF(__a_sb, -4)))) + (0.00958f * (__SBREF(__a_sb, -3)))) + (0.00959f * (__SBREF(__a_sb, -2)))) + (0.00960f * (__SBREF(__a_sb, -1)))) + (0.00961f * (__REGREF(__a, 0)))) + (0.00962f * (__SBREF(__a_sb, 1)))) + (0.00963f * (__SBREF(__a_sb, 2)))) + (0.00964f * (__SBREF(__a_sb, 3)))) + (0.00965f * (__SBREF(__a_sb, 4))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { double etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((0.00966f * (__SBREF(__a_sb, -4)))) + (0.00967f * (__SBREF(__a_sb, -3)))) + (0.00968f * (__SBREF(__a_sb, -2)))) + (0.00969f * (__SBREF(__a_sb, -1)))) + (0.22400f * (__REGREF(__a, 0)))) + (0.00971f * (__SBREF(__a_sb, 1)))) + (0.00972f * (__SBREF(__a_sb, 2)))) + (0.00973f * (__SBREF(__a_sb, 3)))) + (0.00974f * (__SBREF(__a_sb, 4)))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { double etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_5_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((0.00975f * (__SBREF(__a_sb, -4)))) + (0.00976f * (__SBREF(__a_sb, -3)))) + (0.00977f * (__SBREF(__a_sb, -2)))) + (0.00978f * (__SBREF(__a_sb, -1)))) + (0.00979f * (__REGREF(__a, 0)))) + (0.00980f * (__SBREF(__a_sb, 1)))) + (0.00981f * (__SBREF(__a_sb, 2)))) + (0.00982f * (__SBREF(__a_sb, 3)))) + (0.00983f * (__SBREF(__a_sb, 4))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_5(out, a) do { double etmp; __CALCEXPR_5_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_6_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((0.00984f * (__SBREF(__a_sb, -4)))) + (0.00985f * (__SBREF(__a_sb, -3)))) + (0.00986f * (__SBREF(__a_sb, -2)))) + (0.00987f * (__SBREF(__a_sb, -1)))) + (0.00988f * (__REGREF(__a, 0)))) + (0.00989f * (__SBREF(__a_sb, 1)))) + (0.00990f * (__SBREF(__a_sb, 2)))) + (0.00991f * (__SBREF(__a_sb, 3)))) + (0.00992f * (__SBREF(__a_sb, 4)))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_6(out, a) do { double etmp; __CALCEXPR_6_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_7_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((0.00993f * (__SBREF(__a_sb, -4)))) + (0.00994f * (__SBREF(__a_sb, -3)))) + (0.00995f * (__SBREF(__a_sb, -2)))) + (0.00996f * (__SBREF(__a_sb, -1)))) + (0.00997f * (__REGREF(__a, 0)))) + (0.00998f * (__SBREF(__a_sb, 1)))) + (0.00999f * (__SBREF(__a_sb, 2)))) + (0.01000f * (__SBREF(__a_sb, 3)))) + (0.01001f * (__SBREF(__a_sb, 4))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_7(out, a) do { double etmp; __CALCEXPR_7_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_8_wrap(__rn0, __a) do { __rn0 = ((((((((((0.01002f * (__SBREF(__a_sb, -4)))) + (0.01003f * (__SBREF(__a_sb, -3)))) + (0.01004f * (__SBREF(__a_sb, -2)))) + (0.01005f * (__SBREF(__a_sb, -1)))) + (0.01006f * (__REGREF(__a, 0)))) + (0.01007f * (__SBREF(__a_sb, 1)))) + (0.01008f * (__SBREF(__a_sb, 2)))) + (0.01009f * (__SBREF(__a_sb, 3)))) + (0.01010f * (__SBREF(__a_sb, 4)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_8(out, a) do { double etmp; __CALCEXPR_8_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); __CALCEXPR_5(out5, reg); __CALCEXPR_6(out6, reg); __CALCEXPR_7(out7, reg); __CALCEXPR_8(out8, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg); } else out4 = reg; } while (0)
#define __CALC2(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg); } else out4 = reg; } while (0)
#define __CALC3(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg); } else out4 = reg; } while (0)
#define __CALC4(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg); } else out4 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_0);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_0);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_0);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_0);
__CALC4(__reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_0);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_0);
__CALC4(__reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_0);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_0);
__CALC4(__reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__CALC4(__reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_4);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_5);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_6);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_3_7);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_3_8);
__STORE(4, __reg_4_4);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_3_0);
__STORE(5, __reg_4_5);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__CALC4(__reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_3_1);
__STORE(6, __reg_4_6);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__CALC4(__reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_3_2);
__STORE(7, __reg_4_7);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__CALC4(__reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_3_3);
__STORE(8, __reg_4_8);
__LOAD(__reg_0, 25);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__CALC4(__reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_4);
__STORE(9, __reg_4_0);
__LOAD(__reg_0, 26);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_5);
__STORE(10, __reg_4_1);
__LOAD(__reg_0, 27);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_6);
__STORE(11, __reg_4_2);
__LOAD(__reg_0, 28);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_3_7);
__STORE(12, __reg_4_3);
__LOAD(__reg_0, 29);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_3_8);
__STORE(13, __reg_4_4);
__LOAD(__reg_0, 30);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_3_0);
__STORE(14, __reg_4_5);
__LOAD(__reg_0, 31);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__CALC4(__reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_3_1);
__STORE(15, __reg_4_6);
__LOAD(__reg_0, 32);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__CALC4(__reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_3_2);
__STORE(16, __reg_4_7);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__CALC4(__reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_3_3);
__LOAD(__reg_0, 25);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__CALC4(__reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_4);
__LOAD(__reg_0, 26);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_5);
__LOAD(__reg_0, 27);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_6);
__LOAD(__reg_0, 28);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_3_7);
__LOAD(__reg_0, 29);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_3_8);
__LOAD(__reg_0, 30);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_3_0);
__LOAD(__reg_0, 31);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__CALC4(__reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_3_1);
__LOAD(__reg_0, 32);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__CALC4(__reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_3_2);
__STORE(16, __reg_4_7);
}
__a_sb = __a_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 33; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 13;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__CALC4(__reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_3_3);
__STORE(__h - 16, __reg_4_8);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__CALC4(__reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_4);
__STORE(__h - 16, __reg_4_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_5);
__STORE(__h - 16, __reg_4_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_6);
__STORE(__h - 16, __reg_4_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_3_7);
__STORE(__h - 16, __reg_4_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_3_8);
__STORE(__h - 16, __reg_4_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_3_0);
__STORE(__h - 16, __reg_4_5);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__CALC4(__reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_3_1);
__STORE(__h - 16, __reg_4_6);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__CALC4(__reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_3_2);
__STORE(__h - 16, __reg_4_7);
__h++;
}
if (0) {}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__CALC4(__reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_3_3);
__STORE(__h - 16, __reg_4_8);
__reg_1_6 = __reg_0;
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__CALC4(__reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_4);
__STORE(__h - 15, __reg_4_0);
__reg_1_7 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_5);
__STORE(__h - 14, __reg_4_1);
__reg_1_8 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_6);
__STORE(__h - 13, __reg_4_2);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_3_7);
__STORE(__h - 12, __reg_4_3);
__reg_2_6 = __reg_1_6;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_3_8);
__STORE(__h - 11, __reg_4_4);
__reg_2_7 = __reg_1_7;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_3_0);
__STORE(__h - 10, __reg_4_5);
__reg_2_8 = __reg_1_8;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__CALC4(__reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_3_1);
__STORE(__h - 9, __reg_4_6);
__reg_2_0 = __reg_1_0;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__CALC4(__reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_3_2);
__STORE(__h - 8, __reg_4_7);
__reg_3_6 = __reg_2_6;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__CALC4(__reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_3_3);
__STORE(__h - 7, __reg_4_8);
__reg_3_7 = __reg_2_7;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_5, __reg_3_4, __reg_2_8);
__CALC4(__reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_4);
__STORE(__h - 6, __reg_4_0);
__reg_3_8 = __reg_2_8;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_5, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_5);
__STORE(__h - 5, __reg_4_1);
__reg_3_0 = __reg_2_0;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_6);
__STORE(__h - 4, __reg_4_2);
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_5, __reg_4_4, __reg_4_3, __reg_3_7);
__STORE(__h - 3, __reg_4_3);
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_5, __reg_4_4, __reg_3_8);
__STORE(__h - 2, __reg_4_4);
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_5, __reg_3_0);
__STORE(__h - 1, __reg_4_5);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__CALC4(__reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_3_3);
__STORE(__h - 16, __reg_4_8);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__CALC4(__reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_4);
__STORE(__h - 15, __reg_4_0);
__reg_1_7 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_5);
__STORE(__h - 14, __reg_4_1);
__reg_1_8 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_6);
__STORE(__h - 13, __reg_4_2);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_3_7);
__STORE(__h - 12, __reg_4_3);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_3_8);
__STORE(__h - 11, __reg_4_4);
__reg_2_7 = __reg_1_7;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_3_0);
__STORE(__h - 10, __reg_4_5);
__reg_2_8 = __reg_1_8;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_6, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__CALC4(__reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_3_1);
__STORE(__h - 9, __reg_4_6);
__reg_2_0 = __reg_1_0;
__CALC2(__reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_6, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__CALC4(__reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_3_2);
__STORE(__h - 8, __reg_4_7);
__reg_2_1 = __reg_1_1;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__CALC4(__reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_3_3);
__STORE(__h - 7, __reg_4_8);
__reg_3_7 = __reg_2_7;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__CALC4(__reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_4);
__STORE(__h - 6, __reg_4_0);
__reg_3_8 = __reg_2_8;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_6, __reg_3_5, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_5);
__STORE(__h - 5, __reg_4_1);
__reg_3_0 = __reg_2_0;
__CALC3(__reg_3_5, __reg_3_5, __reg_3_5, __reg_3_5, __reg_3_5, __reg_3_5, __reg_3_5, __reg_3_5, __reg_3_6, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_6);
__STORE(__h - 4, __reg_4_2);
__reg_3_1 = __reg_2_1;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_3_7);
__STORE(__h - 3, __reg_4_3);
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_6, __reg_4_5, __reg_4_4, __reg_3_8);
__STORE(__h - 2, __reg_4_4);
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_6, __reg_4_5, __reg_3_0);
__STORE(__h - 1, __reg_4_5);
__CALC4(__reg_4_5, __reg_4_5, __reg_4_5, __reg_4_5, __reg_4_5, __reg_4_5, __reg_4_5, __reg_4_5, __reg_4_6, __reg_3_1);
__STORE(__h + 0, __reg_4_6);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__CALC4(__reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_3_3);
__STORE(__h - 16, __reg_4_8);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__CALC4(__reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_4);
__STORE(__h - 15, __reg_4_0);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_5);
__STORE(__h - 14, __reg_4_1);
__reg_1_8 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_6);
__STORE(__h - 13, __reg_4_2);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_3_7);
__STORE(__h - 12, __reg_4_3);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_3_8);
__STORE(__h - 11, __reg_4_4);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_3_0);
__STORE(__h - 10, __reg_4_5);
__reg_2_8 = __reg_1_8;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__CALC4(__reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_3_1);
__STORE(__h - 9, __reg_4_6);
__reg_2_0 = __reg_1_0;
__CALC2(__reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_7, __reg_2_6, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__CALC4(__reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_3_2);
__STORE(__h - 8, __reg_4_7);
__reg_2_1 = __reg_1_1;
__CALC2(__reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__CALC4(__reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_3_3);
__STORE(__h - 7, __reg_4_8);
__reg_2_2 = __reg_1_2;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__CALC4(__reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_4);
__STORE(__h - 6, __reg_4_0);
__reg_3_8 = __reg_2_8;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_5);
__STORE(__h - 5, __reg_4_1);
__reg_3_0 = __reg_2_0;
__CALC3(__reg_3_5, __reg_3_5, __reg_3_5, __reg_3_5, __reg_3_5, __reg_3_5, __reg_3_5, __reg_3_7, __reg_3_6, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_6);
__STORE(__h - 4, __reg_4_2);
__reg_3_1 = __reg_2_1;
__CALC3(__reg_3_6, __reg_3_6, __reg_3_6, __reg_3_6, __reg_3_6, __reg_3_6, __reg_3_6, __reg_3_6, __reg_3_7, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_3_7);
__STORE(__h - 3, __reg_4_3);
__reg_3_2 = __reg_2_2;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_3_8);
__STORE(__h - 2, __reg_4_4);
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_7, __reg_4_6, __reg_4_5, __reg_3_0);
__STORE(__h - 1, __reg_4_5);
__CALC4(__reg_4_5, __reg_4_5, __reg_4_5, __reg_4_5, __reg_4_5, __reg_4_5, __reg_4_5, __reg_4_7, __reg_4_6, __reg_3_1);
__STORE(__h + 0, __reg_4_6);
__CALC4(__reg_4_6, __reg_4_6, __reg_4_6, __reg_4_6, __reg_4_6, __reg_4_6, __reg_4_6, __reg_4_6, __reg_4_7, __reg_3_2);
__STORE(__h + 1, __reg_4_7);
}
else if (__h + 7 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__CALC4(__reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_3_3);
__STORE(__h - 16, __reg_4_8);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__CALC4(__reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_4);
__STORE(__h - 15, __reg_4_0);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_5);
__STORE(__h - 14, __reg_4_1);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_6);
__STORE(__h - 13, __reg_4_2);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_3_7);
__STORE(__h - 12, __reg_4_3);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_3_8);
__STORE(__h - 11, __reg_4_4);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_3_0);
__STORE(__h - 10, __reg_4_5);
__reg_1_3 = __reg_0;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__CALC4(__reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_3_1);
__STORE(__h - 9, __reg_4_6);
__reg_2_0 = __reg_1_0;
__CALC2(__reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__CALC4(__reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_3_2);
__STORE(__h - 8, __reg_4_7);
__reg_2_1 = __reg_1_1;
__CALC2(__reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__CALC4(__reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_3_3);
__STORE(__h - 7, __reg_4_8);
__reg_2_2 = __reg_1_2;
__CALC2(__reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__CALC4(__reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_4);
__STORE(__h - 6, __reg_4_0);
__reg_2_3 = __reg_1_3;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_5);
__STORE(__h - 5, __reg_4_1);
__reg_3_0 = __reg_2_0;
__CALC3(__reg_3_5, __reg_3_5, __reg_3_5, __reg_3_5, __reg_3_5, __reg_3_5, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_6);
__STORE(__h - 4, __reg_4_2);
__reg_3_1 = __reg_2_1;
__CALC3(__reg_3_6, __reg_3_6, __reg_3_6, __reg_3_6, __reg_3_6, __reg_3_6, __reg_3_6, __reg_3_8, __reg_3_7, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_3_7);
__STORE(__h - 3, __reg_4_3);
__reg_3_2 = __reg_2_2;
__CALC3(__reg_3_7, __reg_3_7, __reg_3_7, __reg_3_7, __reg_3_7, __reg_3_7, __reg_3_7, __reg_3_7, __reg_3_8, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_3_8);
__STORE(__h - 2, __reg_4_4);
__reg_3_3 = __reg_2_3;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_3_0);
__STORE(__h - 1, __reg_4_5);
__CALC4(__reg_4_5, __reg_4_5, __reg_4_5, __reg_4_5, __reg_4_5, __reg_4_5, __reg_4_8, __reg_4_7, __reg_4_6, __reg_3_1);
__STORE(__h + 0, __reg_4_6);
__CALC4(__reg_4_6, __reg_4_6, __reg_4_6, __reg_4_6, __reg_4_6, __reg_4_6, __reg_4_6, __reg_4_8, __reg_4_7, __reg_3_2);
__STORE(__h + 1, __reg_4_7);
__CALC4(__reg_4_7, __reg_4_7, __reg_4_7, __reg_4_7, __reg_4_7, __reg_4_7, __reg_4_7, __reg_4_7, __reg_4_8, __reg_3_3);
__STORE(__h + 2, __reg_4_8);
}
else if (__h + 8 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__CALC4(__reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_3_3);
__STORE(__h - 16, __reg_4_8);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__CALC4(__reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_4);
__STORE(__h - 15, __reg_4_0);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_5);
__STORE(__h - 14, __reg_4_1);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_6);
__STORE(__h - 13, __reg_4_2);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_3_7);
__STORE(__h - 12, __reg_4_3);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_3_8);
__STORE(__h - 11, __reg_4_4);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_3_0);
__STORE(__h - 10, __reg_4_5);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__CALC4(__reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_3_1);
__STORE(__h - 9, __reg_4_6);
__reg_1_4 = __reg_0;
__CALC2(__reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__CALC4(__reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_3_2);
__STORE(__h - 8, __reg_4_7);
__reg_2_1 = __reg_1_1;
__CALC2(__reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__CALC4(__reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_3_3);
__STORE(__h - 7, __reg_4_8);
__reg_2_2 = __reg_1_2;
__CALC2(__reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__CALC4(__reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_4);
__STORE(__h - 6, __reg_4_0);
__reg_2_3 = __reg_1_3;
__CALC2(__reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_5);
__STORE(__h - 5, __reg_4_1);
__reg_2_4 = __reg_1_4;
__CALC3(__reg_3_5, __reg_3_5, __reg_3_5, __reg_3_5, __reg_3_5, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_6);
__STORE(__h - 4, __reg_4_2);
__reg_3_1 = __reg_2_1;
__CALC3(__reg_3_6, __reg_3_6, __reg_3_6, __reg_3_6, __reg_3_6, __reg_3_6, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_3_7);
__STORE(__h - 3, __reg_4_3);
__reg_3_2 = __reg_2_2;
__CALC3(__reg_3_7, __reg_3_7, __reg_3_7, __reg_3_7, __reg_3_7, __reg_3_7, __reg_3_7, __reg_3_0, __reg_3_8, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_3_8);
__STORE(__h - 2, __reg_4_4);
__reg_3_3 = __reg_2_3;
__CALC3(__reg_3_8, __reg_3_8, __reg_3_8, __reg_3_8, __reg_3_8, __reg_3_8, __reg_3_8, __reg_3_8, __reg_3_0, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_3_0);
__STORE(__h - 1, __reg_4_5);
__reg_3_4 = __reg_2_4;
__CALC4(__reg_4_5, __reg_4_5, __reg_4_5, __reg_4_5, __reg_4_5, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_3_1);
__STORE(__h + 0, __reg_4_6);
__CALC4(__reg_4_6, __reg_4_6, __reg_4_6, __reg_4_6, __reg_4_6, __reg_4_6, __reg_4_0, __reg_4_8, __reg_4_7, __reg_3_2);
__STORE(__h + 1, __reg_4_7);
__CALC4(__reg_4_7, __reg_4_7, __reg_4_7, __reg_4_7, __reg_4_7, __reg_4_7, __reg_4_7, __reg_4_0, __reg_4_8, __reg_3_3);
__STORE(__h + 2, __reg_4_8);
__CALC4(__reg_4_8, __reg_4_8, __reg_4_8, __reg_4_8, __reg_4_8, __reg_4_8, __reg_4_8, __reg_4_8, __reg_4_0, __reg_3_4);
__STORE(__h + 3, __reg_4_0);
}
else if (__h + 9 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__CALC4(__reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_3_3);
__STORE(__h - 16, __reg_4_8);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__CALC4(__reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_4);
__STORE(__h - 15, __reg_4_0);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_5);
__STORE(__h - 14, __reg_4_1);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_6);
__STORE(__h - 13, __reg_4_2);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_3_7);
__STORE(__h - 12, __reg_4_3);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_3_8);
__STORE(__h - 11, __reg_4_4);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_3_0);
__STORE(__h - 10, __reg_4_5);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__CALC4(__reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_3_1);
__STORE(__h - 9, __reg_4_6);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 8);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__CALC4(__reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_3_2);
__STORE(__h - 8, __reg_4_7);
__reg_1_5 = __reg_0;
__CALC2(__reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__CALC4(__reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_3_3);
__STORE(__h - 7, __reg_4_8);
__reg_2_2 = __reg_1_2;
__CALC2(__reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__CALC4(__reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_4);
__STORE(__h - 6, __reg_4_0);
__reg_2_3 = __reg_1_3;
__CALC2(__reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_5);
__STORE(__h - 5, __reg_4_1);
__reg_2_4 = __reg_1_4;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_6);
__STORE(__h - 4, __reg_4_2);
__reg_2_5 = __reg_1_5;
__CALC3(__reg_3_6, __reg_3_6, __reg_3_6, __reg_3_6, __reg_3_6, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_3_7);
__STORE(__h - 3, __reg_4_3);
__reg_3_2 = __reg_2_2;
__CALC3(__reg_3_7, __reg_3_7, __reg_3_7, __reg_3_7, __reg_3_7, __reg_3_7, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_3_8);
__STORE(__h - 2, __reg_4_4);
__reg_3_3 = __reg_2_3;
__CALC3(__reg_3_8, __reg_3_8, __reg_3_8, __reg_3_8, __reg_3_8, __reg_3_8, __reg_3_8, __reg_3_1, __reg_3_0, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_3_0);
__STORE(__h - 1, __reg_4_5);
__reg_3_4 = __reg_2_4;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_5);
__CALC4(__reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_3_1);
__STORE(__h + 0, __reg_4_6);
__reg_3_5 = __reg_2_5;
__CALC4(__reg_4_6, __reg_4_6, __reg_4_6, __reg_4_6, __reg_4_6, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_3_2);
__STORE(__h + 1, __reg_4_7);
__CALC4(__reg_4_7, __reg_4_7, __reg_4_7, __reg_4_7, __reg_4_7, __reg_4_7, __reg_4_1, __reg_4_0, __reg_4_8, __reg_3_3);
__STORE(__h + 2, __reg_4_8);
__CALC4(__reg_4_8, __reg_4_8, __reg_4_8, __reg_4_8, __reg_4_8, __reg_4_8, __reg_4_8, __reg_4_1, __reg_4_0, __reg_3_4);
__STORE(__h + 3, __reg_4_0);
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_1, __reg_3_5);
__STORE(__h + 4, __reg_4_1);
}
else if (__h + 10 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__CALC4(__reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_3_3);
__STORE(__h - 16, __reg_4_8);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__CALC4(__reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_4);
__STORE(__h - 15, __reg_4_0);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_5);
__STORE(__h - 14, __reg_4_1);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_6);
__STORE(__h - 13, __reg_4_2);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_3_7);
__STORE(__h - 12, __reg_4_3);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_3_8);
__STORE(__h - 11, __reg_4_4);
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_3_0);
__STORE(__h - 10, __reg_4_5);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__CALC4(__reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_3_1);
__STORE(__h - 9, __reg_4_6);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 8);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__CALC4(__reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_3_2);
__STORE(__h - 8, __reg_4_7);
__reg_1_5 = __reg_0;
__LOAD(__reg_0, __h + 9);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__CALC4(__reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_3_3);
__STORE(__h - 7, __reg_4_8);
__reg_1_6 = __reg_0;
__CALC2(__reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__CALC4(__reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_4);
__STORE(__h - 6, __reg_4_0);
__reg_2_3 = __reg_1_3;
__CALC2(__reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_5);
__STORE(__h - 5, __reg_4_1);
__reg_2_4 = __reg_1_4;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_6);
__STORE(__h - 4, __reg_4_2);
__reg_2_5 = __reg_1_5;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_3_7);
__STORE(__h - 3, __reg_4_3);
__reg_2_6 = __reg_1_6;
__CALC3(__reg_3_7, __reg_3_7, __reg_3_7, __reg_3_7, __reg_3_7, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_3_8);
__STORE(__h - 2, __reg_4_4);
__reg_3_3 = __reg_2_3;
__CALC3(__reg_3_8, __reg_3_8, __reg_3_8, __reg_3_8, __reg_3_8, __reg_3_8, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_3_0);
__STORE(__h - 1, __reg_4_5);
__reg_3_4 = __reg_2_4;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_5);
__CALC4(__reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_3_1);
__STORE(__h + 0, __reg_4_6);
__reg_3_5 = __reg_2_5;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_6);
__CALC4(__reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_3_2);
__STORE(__h + 1, __reg_4_7);
__reg_3_6 = __reg_2_6;
__CALC4(__reg_4_7, __reg_4_7, __reg_4_7, __reg_4_7, __reg_4_7, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_3_3);
__STORE(__h + 2, __reg_4_8);
__CALC4(__reg_4_8, __reg_4_8, __reg_4_8, __reg_4_8, __reg_4_8, __reg_4_8, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_4);
__STORE(__h + 3, __reg_4_0);
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_2, __reg_4_1, __reg_3_5);
__STORE(__h + 4, __reg_4_1);
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_2, __reg_3_6);
__STORE(__h + 5, __reg_4_2);
}
else if (__h + 11 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__CALC4(__reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_3_3);
__STORE(__h - 16, __reg_4_8);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__CALC4(__reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_4);
__STORE(__h - 15, __reg_4_0);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_5);
__STORE(__h - 14, __reg_4_1);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_6);
__STORE(__h - 13, __reg_4_2);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_3_7);
__STORE(__h - 12, __reg_4_3);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_3_8);
__STORE(__h - 11, __reg_4_4);
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_3_0);
__STORE(__h - 10, __reg_4_5);
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__CALC4(__reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_3_1);
__STORE(__h - 9, __reg_4_6);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 8);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__CALC4(__reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_3_2);
__STORE(__h - 8, __reg_4_7);
__reg_1_5 = __reg_0;
__LOAD(__reg_0, __h + 9);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__CALC4(__reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_3_3);
__STORE(__h - 7, __reg_4_8);
__reg_1_6 = __reg_0;
__LOAD(__reg_0, __h + 10);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__CALC4(__reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_4);
__STORE(__h - 6, __reg_4_0);
__reg_1_7 = __reg_0;
__CALC2(__reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_5);
__STORE(__h - 5, __reg_4_1);
__reg_2_4 = __reg_1_4;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_6);
__STORE(__h - 4, __reg_4_2);
__reg_2_5 = __reg_1_5;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_3_7);
__STORE(__h - 3, __reg_4_3);
__reg_2_6 = __reg_1_6;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_3_8);
__STORE(__h - 2, __reg_4_4);
__reg_2_7 = __reg_1_7;
__CALC3(__reg_3_8, __reg_3_8, __reg_3_8, __reg_3_8, __reg_3_8, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_3_0);
__STORE(__h - 1, __reg_4_5);
__reg_3_4 = __reg_2_4;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__CALC4(__reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_3_1);
__STORE(__h + 0, __reg_4_6);
__reg_3_5 = __reg_2_5;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_6);
__CALC4(__reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_3_2);
__STORE(__h + 1, __reg_4_7);
__reg_3_6 = __reg_2_6;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_7);
__CALC4(__reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_3_3);
__STORE(__h + 2, __reg_4_8);
__reg_3_7 = __reg_2_7;
__CALC4(__reg_4_8, __reg_4_8, __reg_4_8, __reg_4_8, __reg_4_8, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_4);
__STORE(__h + 3, __reg_4_0);
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_5);
__STORE(__h + 4, __reg_4_1);
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_3, __reg_4_2, __reg_3_6);
__STORE(__h + 5, __reg_4_2);
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_3, __reg_3_7);
__STORE(__h + 6, __reg_4_3);
}
else if (__h + 12 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__CALC4(__reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_3_3);
__STORE(__h - 16, __reg_4_8);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__CALC4(__reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_4);
__STORE(__h - 15, __reg_4_0);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_5);
__STORE(__h - 14, __reg_4_1);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_6);
__STORE(__h - 13, __reg_4_2);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_3_7);
__STORE(__h - 12, __reg_4_3);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_3_8);
__STORE(__h - 11, __reg_4_4);
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_3_0);
__STORE(__h - 10, __reg_4_5);
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__CALC4(__reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_3_1);
__STORE(__h - 9, __reg_4_6);
__LOAD(__reg_0, __h + 8);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__CALC4(__reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_3_2);
__STORE(__h - 8, __reg_4_7);
__reg_1_5 = __reg_0;
__LOAD(__reg_0, __h + 9);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__CALC4(__reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_3_3);
__STORE(__h - 7, __reg_4_8);
__reg_1_6 = __reg_0;
__LOAD(__reg_0, __h + 10);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__CALC4(__reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_4);
__STORE(__h - 6, __reg_4_0);
__reg_1_7 = __reg_0;
__LOAD(__reg_0, __h + 11);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_5);
__STORE(__h - 5, __reg_4_1);
__reg_1_8 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_6);
__STORE(__h - 4, __reg_4_2);
__reg_2_5 = __reg_1_5;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_3_7);
__STORE(__h - 3, __reg_4_3);
__reg_2_6 = __reg_1_6;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_3_8);
__STORE(__h - 2, __reg_4_4);
__reg_2_7 = __reg_1_7;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_3_0);
__STORE(__h - 1, __reg_4_5);
__reg_2_8 = __reg_1_8;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__CALC4(__reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_3_1);
__STORE(__h + 0, __reg_4_6);
__reg_3_5 = __reg_2_5;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__CALC4(__reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_3_2);
__STORE(__h + 1, __reg_4_7);
__reg_3_6 = __reg_2_6;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_7);
__CALC4(__reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_3_3);
__STORE(__h + 2, __reg_4_8);
__reg_3_7 = __reg_2_7;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_8);
__CALC4(__reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_4);
__STORE(__h + 3, __reg_4_0);
__reg_3_8 = __reg_2_8;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_5);
__STORE(__h + 4, __reg_4_1);
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_6);
__STORE(__h + 5, __reg_4_2);
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_4, __reg_4_3, __reg_3_7);
__STORE(__h + 6, __reg_4_3);
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_4, __reg_3_8);
__STORE(__h + 7, __reg_4_4);
}
}
else
{
for (__h = 33; __h <= __side1LenOl - 9;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__CALC4(__reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_3_3);
__STORE(__h - 16, __reg_4_8);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__CALC4(__reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_4);
__STORE(__h - 16, __reg_4_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_5);
__STORE(__h - 16, __reg_4_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_6);
__STORE(__h - 16, __reg_4_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_3_7);
__STORE(__h - 16, __reg_4_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_3_8);
__STORE(__h - 16, __reg_4_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_3_0);
__STORE(__h - 16, __reg_4_5);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__CALC4(__reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_3_1);
__STORE(__h - 16, __reg_4_6);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__CALC4(__reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_3_2);
__STORE(__h - 16, __reg_4_7);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__CALC4(__reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_3_3);
__STORE(__h - 16, __reg_4_8);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__CALC4(__reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_4);
__STORE(__h - 16, __reg_4_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_5);
__STORE(__h - 16, __reg_4_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_6);
__STORE(__h - 16, __reg_4_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_3_7);
__STORE(__h - 16, __reg_4_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_4_4, __reg_3_8);
__STORE(__h - 16, __reg_4_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_4_5, __reg_3_0);
__STORE(__h - 16, __reg_4_5);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__CALC4(__reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_4_6, __reg_3_1);
__STORE(__h - 16, __reg_4_6);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__CALC4(__reg_4_6, __reg_4_5, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_8, __reg_4_7, __reg_3_2);
__STORE(__h - 16, __reg_4_7);
__h++;
}
}
__global__ void kernel0_3(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 4 - 4);
const AN5D_TYPE __c1Pad = (4);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 4 - 4);
const AN5D_TYPE __c2Pad = (4);
#define __c2 c2
const AN5D_TYPE __halo1 = 4;
const AN5D_TYPE __halo2 = 4;
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 488;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_1_5;
double __reg_1_6;
double __reg_1_7;
double __reg_1_8;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_2_5;
double __reg_2_6;
double __reg_2_7;
double __reg_2_8;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_3_5;
double __reg_3_6;
double __reg_3_7;
double __reg_3_8;
__shared__ double __a_sb_double[__blockSize * 2];
double *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __storeValid = __writeValid3;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((0.00930f * (__SBREF(__a_sb, -4))) + (0.00931f * (__SBREF(__a_sb, -3)))) + (0.00932f * (__SBREF(__a_sb, -2)))) + (0.00933f * (__SBREF(__a_sb, -1)))) + (0.00934f * (__REGREF(__a, 0)))) + (0.00935f * (__SBREF(__a_sb, 1)))) + (0.00936f * (__SBREF(__a_sb, 2)))) + (0.00937f * (__SBREF(__a_sb, 3)))) + (0.00938f * (__SBREF(__a_sb, 4)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((0.00939f * (__SBREF(__a_sb, -4)))) + (0.00940f * (__SBREF(__a_sb, -3)))) + (0.00941f * (__SBREF(__a_sb, -2)))) + (0.00942f * (__SBREF(__a_sb, -1)))) + (0.00943f * (__REGREF(__a, 0)))) + (0.00944f * (__SBREF(__a_sb, 1)))) + (0.00945f * (__SBREF(__a_sb, 2)))) + (0.00946f * (__SBREF(__a_sb, 3)))) + (0.00947f * (__SBREF(__a_sb, 4))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((0.00948f * (__SBREF(__a_sb, -4)))) + (0.00949f * (__SBREF(__a_sb, -3)))) + (0.00950f * (__SBREF(__a_sb, -2)))) + (0.00951f * (__SBREF(__a_sb, -1)))) + (0.00952f * (__REGREF(__a, 0)))) + (0.00953f * (__SBREF(__a_sb, 1)))) + (0.00954f * (__SBREF(__a_sb, 2)))) + (0.00955f * (__SBREF(__a_sb, 3)))) + (0.00956f * (__SBREF(__a_sb, 4)))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((((((((((((((((((((0.00957f * (__SBREF(__a_sb, -4)))) + (0.00958f * (__SBREF(__a_sb, -3)))) + (0.00959f * (__SBREF(__a_sb, -2)))) + (0.00960f * (__SBREF(__a_sb, -1)))) + (0.00961f * (__REGREF(__a, 0)))) + (0.00962f * (__SBREF(__a_sb, 1)))) + (0.00963f * (__SBREF(__a_sb, 2)))) + (0.00964f * (__SBREF(__a_sb, 3)))) + (0.00965f * (__SBREF(__a_sb, 4))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { double etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((0.00966f * (__SBREF(__a_sb, -4)))) + (0.00967f * (__SBREF(__a_sb, -3)))) + (0.00968f * (__SBREF(__a_sb, -2)))) + (0.00969f * (__SBREF(__a_sb, -1)))) + (0.22400f * (__REGREF(__a, 0)))) + (0.00971f * (__SBREF(__a_sb, 1)))) + (0.00972f * (__SBREF(__a_sb, 2)))) + (0.00973f * (__SBREF(__a_sb, 3)))) + (0.00974f * (__SBREF(__a_sb, 4)))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { double etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_5_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((0.00975f * (__SBREF(__a_sb, -4)))) + (0.00976f * (__SBREF(__a_sb, -3)))) + (0.00977f * (__SBREF(__a_sb, -2)))) + (0.00978f * (__SBREF(__a_sb, -1)))) + (0.00979f * (__REGREF(__a, 0)))) + (0.00980f * (__SBREF(__a_sb, 1)))) + (0.00981f * (__SBREF(__a_sb, 2)))) + (0.00982f * (__SBREF(__a_sb, 3)))) + (0.00983f * (__SBREF(__a_sb, 4))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_5(out, a) do { double etmp; __CALCEXPR_5_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_6_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((0.00984f * (__SBREF(__a_sb, -4)))) + (0.00985f * (__SBREF(__a_sb, -3)))) + (0.00986f * (__SBREF(__a_sb, -2)))) + (0.00987f * (__SBREF(__a_sb, -1)))) + (0.00988f * (__REGREF(__a, 0)))) + (0.00989f * (__SBREF(__a_sb, 1)))) + (0.00990f * (__SBREF(__a_sb, 2)))) + (0.00991f * (__SBREF(__a_sb, 3)))) + (0.00992f * (__SBREF(__a_sb, 4)))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_6(out, a) do { double etmp; __CALCEXPR_6_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_7_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((0.00993f * (__SBREF(__a_sb, -4)))) + (0.00994f * (__SBREF(__a_sb, -3)))) + (0.00995f * (__SBREF(__a_sb, -2)))) + (0.00996f * (__SBREF(__a_sb, -1)))) + (0.00997f * (__REGREF(__a, 0)))) + (0.00998f * (__SBREF(__a_sb, 1)))) + (0.00999f * (__SBREF(__a_sb, 2)))) + (0.01000f * (__SBREF(__a_sb, 3)))) + (0.01001f * (__SBREF(__a_sb, 4))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_7(out, a) do { double etmp; __CALCEXPR_7_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_8_wrap(__rn0, __a) do { __rn0 = ((((((((((0.01002f * (__SBREF(__a_sb, -4)))) + (0.01003f * (__SBREF(__a_sb, -3)))) + (0.01004f * (__SBREF(__a_sb, -2)))) + (0.01005f * (__SBREF(__a_sb, -1)))) + (0.01006f * (__REGREF(__a, 0)))) + (0.01007f * (__SBREF(__a_sb, 1)))) + (0.01008f * (__SBREF(__a_sb, 2)))) + (0.01009f * (__SBREF(__a_sb, 3)))) + (0.01010f * (__SBREF(__a_sb, 4)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_8(out, a) do { double etmp; __CALCEXPR_8_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); __CALCEXPR_5(out5, reg); __CALCEXPR_6(out6, reg); __CALCEXPR_7(out7, reg); __CALCEXPR_8(out8, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg); } else out4 = reg; } while (0)
#define __CALC2(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg); } else out4 = reg; } while (0)
#define __CALC3(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg); } else out4 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_0);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_0);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_0);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_0);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__STORE(4, __reg_3_4);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__STORE(5, __reg_3_5);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__STORE(6, __reg_3_6);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__STORE(7, __reg_3_7);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__STORE(8, __reg_3_8);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__STORE(9, __reg_3_0);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__STORE(10, __reg_3_1);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__STORE(11, __reg_3_2);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__STORE(12, __reg_3_3);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__STORE(12, __reg_3_3);
}
__a_sb = __a_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 25; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 13;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__STORE(__h - 12, __reg_3_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__STORE(__h - 12, __reg_3_5);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__STORE(__h - 12, __reg_3_6);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__STORE(__h - 12, __reg_3_7);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__STORE(__h - 12, __reg_3_8);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__STORE(__h - 12, __reg_3_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__STORE(__h - 12, __reg_3_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__STORE(__h - 12, __reg_3_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__STORE(__h - 12, __reg_3_3);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__STORE(__h - 12, __reg_3_4);
__reg_1_7 = __reg_0;
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__STORE(__h - 11, __reg_3_5);
__reg_1_8 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__STORE(__h - 10, __reg_3_6);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__STORE(__h - 9, __reg_3_7);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__STORE(__h - 8, __reg_3_8);
__reg_2_7 = __reg_1_7;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__STORE(__h - 7, __reg_3_0);
__reg_2_8 = __reg_1_8;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_6, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__STORE(__h - 6, __reg_3_1);
__reg_2_0 = __reg_1_0;
__CALC2(__reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_6, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__STORE(__h - 5, __reg_3_2);
__reg_2_1 = __reg_1_1;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__STORE(__h - 4, __reg_3_3);
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__STORE(__h - 3, __reg_3_4);
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_6, __reg_3_5, __reg_2_0);
__STORE(__h - 2, __reg_3_5);
__CALC3(__reg_3_5, __reg_3_5, __reg_3_5, __reg_3_5, __reg_3_5, __reg_3_5, __reg_3_5, __reg_3_5, __reg_3_6, __reg_2_1);
__STORE(__h - 1, __reg_3_6);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__STORE(__h - 12, __reg_3_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__STORE(__h - 11, __reg_3_5);
__reg_1_8 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__STORE(__h - 10, __reg_3_6);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__STORE(__h - 9, __reg_3_7);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__STORE(__h - 8, __reg_3_8);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__STORE(__h - 7, __reg_3_0);
__reg_2_8 = __reg_1_8;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__STORE(__h - 6, __reg_3_1);
__reg_2_0 = __reg_1_0;
__CALC2(__reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_7, __reg_2_6, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__STORE(__h - 5, __reg_3_2);
__reg_2_1 = __reg_1_1;
__CALC2(__reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__STORE(__h - 4, __reg_3_3);
__reg_2_2 = __reg_1_2;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__STORE(__h - 3, __reg_3_4);
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__STORE(__h - 2, __reg_3_5);
__CALC3(__reg_3_5, __reg_3_5, __reg_3_5, __reg_3_5, __reg_3_5, __reg_3_5, __reg_3_5, __reg_3_7, __reg_3_6, __reg_2_1);
__STORE(__h - 1, __reg_3_6);
__CALC3(__reg_3_6, __reg_3_6, __reg_3_6, __reg_3_6, __reg_3_6, __reg_3_6, __reg_3_6, __reg_3_6, __reg_3_7, __reg_2_2);
__STORE(__h + 0, __reg_3_7);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__STORE(__h - 12, __reg_3_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__STORE(__h - 11, __reg_3_5);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__STORE(__h - 10, __reg_3_6);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__STORE(__h - 9, __reg_3_7);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__STORE(__h - 8, __reg_3_8);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__STORE(__h - 7, __reg_3_0);
__reg_1_3 = __reg_0;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__STORE(__h - 6, __reg_3_1);
__reg_2_0 = __reg_1_0;
__CALC2(__reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__STORE(__h - 5, __reg_3_2);
__reg_2_1 = __reg_1_1;
__CALC2(__reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__STORE(__h - 4, __reg_3_3);
__reg_2_2 = __reg_1_2;
__CALC2(__reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__STORE(__h - 3, __reg_3_4);
__reg_2_3 = __reg_1_3;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__STORE(__h - 2, __reg_3_5);
__CALC3(__reg_3_5, __reg_3_5, __reg_3_5, __reg_3_5, __reg_3_5, __reg_3_5, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__STORE(__h - 1, __reg_3_6);
__CALC3(__reg_3_6, __reg_3_6, __reg_3_6, __reg_3_6, __reg_3_6, __reg_3_6, __reg_3_6, __reg_3_8, __reg_3_7, __reg_2_2);
__STORE(__h + 0, __reg_3_7);
__CALC3(__reg_3_7, __reg_3_7, __reg_3_7, __reg_3_7, __reg_3_7, __reg_3_7, __reg_3_7, __reg_3_7, __reg_3_8, __reg_2_3);
__STORE(__h + 1, __reg_3_8);
}
else if (__h + 7 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__STORE(__h - 12, __reg_3_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__STORE(__h - 11, __reg_3_5);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__STORE(__h - 10, __reg_3_6);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__STORE(__h - 9, __reg_3_7);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__STORE(__h - 8, __reg_3_8);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__STORE(__h - 7, __reg_3_0);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__STORE(__h - 6, __reg_3_1);
__reg_1_4 = __reg_0;
__CALC2(__reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__STORE(__h - 5, __reg_3_2);
__reg_2_1 = __reg_1_1;
__CALC2(__reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__STORE(__h - 4, __reg_3_3);
__reg_2_2 = __reg_1_2;
__CALC2(__reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__STORE(__h - 3, __reg_3_4);
__reg_2_3 = __reg_1_3;
__CALC2(__reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__STORE(__h - 2, __reg_3_5);
__reg_2_4 = __reg_1_4;
__CALC3(__reg_3_5, __reg_3_5, __reg_3_5, __reg_3_5, __reg_3_5, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__STORE(__h - 1, __reg_3_6);
__CALC3(__reg_3_6, __reg_3_6, __reg_3_6, __reg_3_6, __reg_3_6, __reg_3_6, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__STORE(__h + 0, __reg_3_7);
__CALC3(__reg_3_7, __reg_3_7, __reg_3_7, __reg_3_7, __reg_3_7, __reg_3_7, __reg_3_7, __reg_3_0, __reg_3_8, __reg_2_3);
__STORE(__h + 1, __reg_3_8);
__CALC3(__reg_3_8, __reg_3_8, __reg_3_8, __reg_3_8, __reg_3_8, __reg_3_8, __reg_3_8, __reg_3_8, __reg_3_0, __reg_2_4);
__STORE(__h + 2, __reg_3_0);
}
else if (__h + 8 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__STORE(__h - 12, __reg_3_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__STORE(__h - 11, __reg_3_5);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__STORE(__h - 10, __reg_3_6);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__STORE(__h - 9, __reg_3_7);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__STORE(__h - 8, __reg_3_8);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__STORE(__h - 7, __reg_3_0);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__STORE(__h - 6, __reg_3_1);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__STORE(__h - 5, __reg_3_2);
__reg_1_5 = __reg_0;
__CALC2(__reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__STORE(__h - 4, __reg_3_3);
__reg_2_2 = __reg_1_2;
__CALC2(__reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__STORE(__h - 3, __reg_3_4);
__reg_2_3 = __reg_1_3;
__CALC2(__reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__STORE(__h - 2, __reg_3_5);
__reg_2_4 = __reg_1_4;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__STORE(__h - 1, __reg_3_6);
__reg_2_5 = __reg_1_5;
__CALC3(__reg_3_6, __reg_3_6, __reg_3_6, __reg_3_6, __reg_3_6, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__STORE(__h + 0, __reg_3_7);
__CALC3(__reg_3_7, __reg_3_7, __reg_3_7, __reg_3_7, __reg_3_7, __reg_3_7, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__STORE(__h + 1, __reg_3_8);
__CALC3(__reg_3_8, __reg_3_8, __reg_3_8, __reg_3_8, __reg_3_8, __reg_3_8, __reg_3_8, __reg_3_1, __reg_3_0, __reg_2_4);
__STORE(__h + 2, __reg_3_0);
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_5);
__STORE(__h + 3, __reg_3_1);
}
else if (__h + 9 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__STORE(__h - 12, __reg_3_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__STORE(__h - 11, __reg_3_5);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__STORE(__h - 10, __reg_3_6);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__STORE(__h - 9, __reg_3_7);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__STORE(__h - 8, __reg_3_8);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__STORE(__h - 7, __reg_3_0);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__STORE(__h - 6, __reg_3_1);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__STORE(__h - 5, __reg_3_2);
__reg_1_5 = __reg_0;
__LOAD(__reg_0, __h + 8);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__STORE(__h - 4, __reg_3_3);
__reg_1_6 = __reg_0;
__CALC2(__reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__STORE(__h - 3, __reg_3_4);
__reg_2_3 = __reg_1_3;
__CALC2(__reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__STORE(__h - 2, __reg_3_5);
__reg_2_4 = __reg_1_4;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__STORE(__h - 1, __reg_3_6);
__reg_2_5 = __reg_1_5;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__STORE(__h + 0, __reg_3_7);
__reg_2_6 = __reg_1_6;
__CALC3(__reg_3_7, __reg_3_7, __reg_3_7, __reg_3_7, __reg_3_7, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__STORE(__h + 1, __reg_3_8);
__CALC3(__reg_3_8, __reg_3_8, __reg_3_8, __reg_3_8, __reg_3_8, __reg_3_8, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__STORE(__h + 2, __reg_3_0);
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_5);
__STORE(__h + 3, __reg_3_1);
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_6);
__STORE(__h + 4, __reg_3_2);
}
else if (__h + 10 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__STORE(__h - 12, __reg_3_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__STORE(__h - 11, __reg_3_5);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__STORE(__h - 10, __reg_3_6);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__STORE(__h - 9, __reg_3_7);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__STORE(__h - 8, __reg_3_8);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__STORE(__h - 7, __reg_3_0);
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__STORE(__h - 6, __reg_3_1);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__STORE(__h - 5, __reg_3_2);
__reg_1_5 = __reg_0;
__LOAD(__reg_0, __h + 8);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__STORE(__h - 4, __reg_3_3);
__reg_1_6 = __reg_0;
__LOAD(__reg_0, __h + 9);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__STORE(__h - 3, __reg_3_4);
__reg_1_7 = __reg_0;
__CALC2(__reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__STORE(__h - 2, __reg_3_5);
__reg_2_4 = __reg_1_4;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__STORE(__h - 1, __reg_3_6);
__reg_2_5 = __reg_1_5;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__STORE(__h + 0, __reg_3_7);
__reg_2_6 = __reg_1_6;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__STORE(__h + 1, __reg_3_8);
__reg_2_7 = __reg_1_7;
__CALC3(__reg_3_8, __reg_3_8, __reg_3_8, __reg_3_8, __reg_3_8, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__STORE(__h + 2, __reg_3_0);
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__STORE(__h + 3, __reg_3_1);
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_6);
__STORE(__h + 4, __reg_3_2);
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_7);
__STORE(__h + 5, __reg_3_3);
}
else if (__h + 11 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__STORE(__h - 12, __reg_3_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__STORE(__h - 11, __reg_3_5);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__STORE(__h - 10, __reg_3_6);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__STORE(__h - 9, __reg_3_7);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__STORE(__h - 8, __reg_3_8);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__STORE(__h - 7, __reg_3_0);
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__STORE(__h - 6, __reg_3_1);
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__STORE(__h - 5, __reg_3_2);
__reg_1_5 = __reg_0;
__LOAD(__reg_0, __h + 8);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__STORE(__h - 4, __reg_3_3);
__reg_1_6 = __reg_0;
__LOAD(__reg_0, __h + 9);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__STORE(__h - 3, __reg_3_4);
__reg_1_7 = __reg_0;
__LOAD(__reg_0, __h + 10);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__STORE(__h - 2, __reg_3_5);
__reg_1_8 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__STORE(__h - 1, __reg_3_6);
__reg_2_5 = __reg_1_5;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__STORE(__h + 0, __reg_3_7);
__reg_2_6 = __reg_1_6;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__STORE(__h + 1, __reg_3_8);
__reg_2_7 = __reg_1_7;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__STORE(__h + 2, __reg_3_0);
__reg_2_8 = __reg_1_8;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__STORE(__h + 3, __reg_3_1);
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__STORE(__h + 4, __reg_3_2);
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_7);
__STORE(__h + 5, __reg_3_3);
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_8);
__STORE(__h + 6, __reg_3_4);
}
else if (__h + 12 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__STORE(__h - 12, __reg_3_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__STORE(__h - 11, __reg_3_5);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__STORE(__h - 10, __reg_3_6);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__STORE(__h - 9, __reg_3_7);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__STORE(__h - 8, __reg_3_8);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__STORE(__h - 7, __reg_3_0);
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__STORE(__h - 6, __reg_3_1);
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__STORE(__h - 5, __reg_3_2);
__LOAD(__reg_0, __h + 8);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__STORE(__h - 4, __reg_3_3);
__reg_1_6 = __reg_0;
__LOAD(__reg_0, __h + 9);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__STORE(__h - 3, __reg_3_4);
__reg_1_7 = __reg_0;
__LOAD(__reg_0, __h + 10);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__STORE(__h - 2, __reg_3_5);
__reg_1_8 = __reg_0;
__LOAD(__reg_0, __h + 11);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__STORE(__h - 1, __reg_3_6);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__STORE(__h + 0, __reg_3_7);
__reg_2_6 = __reg_1_6;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__STORE(__h + 1, __reg_3_8);
__reg_2_7 = __reg_1_7;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__STORE(__h + 2, __reg_3_0);
__reg_2_8 = __reg_1_8;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__STORE(__h + 3, __reg_3_1);
__reg_2_0 = __reg_1_0;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__STORE(__h + 4, __reg_3_2);
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__STORE(__h + 5, __reg_3_3);
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_5, __reg_3_4, __reg_2_8);
__STORE(__h + 6, __reg_3_4);
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_5, __reg_2_0);
__STORE(__h + 7, __reg_3_5);
}
}
else
{
for (__h = 25; __h <= __side1LenOl - 9;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__STORE(__h - 12, __reg_3_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__STORE(__h - 12, __reg_3_5);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__STORE(__h - 12, __reg_3_6);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__STORE(__h - 12, __reg_3_7);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__STORE(__h - 12, __reg_3_8);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__STORE(__h - 12, __reg_3_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__STORE(__h - 12, __reg_3_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__STORE(__h - 12, __reg_3_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__STORE(__h - 12, __reg_3_3);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_2_8);
__STORE(__h - 12, __reg_3_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_2_0);
__STORE(__h - 12, __reg_3_5);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__CALC3(__reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_2_1);
__STORE(__h - 12, __reg_3_6);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__CALC3(__reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_2_2);
__STORE(__h - 12, __reg_3_7);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__CALC3(__reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_2_3);
__STORE(__h - 12, __reg_3_8);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__CALC3(__reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_4);
__STORE(__h - 12, __reg_3_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_5);
__STORE(__h - 12, __reg_3_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_6);
__STORE(__h - 12, __reg_3_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_8, __reg_3_7, __reg_3_6, __reg_3_5, __reg_3_4, __reg_3_3, __reg_2_7);
__STORE(__h - 12, __reg_3_3);
__h++;
}
}
__global__ void kernel0_2(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 4 - 4);
const AN5D_TYPE __c1Pad = (4);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 4 - 4);
const AN5D_TYPE __c2Pad = (4);
#define __c2 c2
const AN5D_TYPE __halo1 = 4;
const AN5D_TYPE __halo2 = 4;
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_1_5;
double __reg_1_6;
double __reg_1_7;
double __reg_1_8;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_2_5;
double __reg_2_6;
double __reg_2_7;
double __reg_2_8;
__shared__ double __a_sb_double[__blockSize * 2];
double *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __storeValid = __writeValid2;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((0.00930f * (__SBREF(__a_sb, -4))) + (0.00931f * (__SBREF(__a_sb, -3)))) + (0.00932f * (__SBREF(__a_sb, -2)))) + (0.00933f * (__SBREF(__a_sb, -1)))) + (0.00934f * (__REGREF(__a, 0)))) + (0.00935f * (__SBREF(__a_sb, 1)))) + (0.00936f * (__SBREF(__a_sb, 2)))) + (0.00937f * (__SBREF(__a_sb, 3)))) + (0.00938f * (__SBREF(__a_sb, 4)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((0.00939f * (__SBREF(__a_sb, -4)))) + (0.00940f * (__SBREF(__a_sb, -3)))) + (0.00941f * (__SBREF(__a_sb, -2)))) + (0.00942f * (__SBREF(__a_sb, -1)))) + (0.00943f * (__REGREF(__a, 0)))) + (0.00944f * (__SBREF(__a_sb, 1)))) + (0.00945f * (__SBREF(__a_sb, 2)))) + (0.00946f * (__SBREF(__a_sb, 3)))) + (0.00947f * (__SBREF(__a_sb, 4))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((0.00948f * (__SBREF(__a_sb, -4)))) + (0.00949f * (__SBREF(__a_sb, -3)))) + (0.00950f * (__SBREF(__a_sb, -2)))) + (0.00951f * (__SBREF(__a_sb, -1)))) + (0.00952f * (__REGREF(__a, 0)))) + (0.00953f * (__SBREF(__a_sb, 1)))) + (0.00954f * (__SBREF(__a_sb, 2)))) + (0.00955f * (__SBREF(__a_sb, 3)))) + (0.00956f * (__SBREF(__a_sb, 4)))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((((((((((((((((((((0.00957f * (__SBREF(__a_sb, -4)))) + (0.00958f * (__SBREF(__a_sb, -3)))) + (0.00959f * (__SBREF(__a_sb, -2)))) + (0.00960f * (__SBREF(__a_sb, -1)))) + (0.00961f * (__REGREF(__a, 0)))) + (0.00962f * (__SBREF(__a_sb, 1)))) + (0.00963f * (__SBREF(__a_sb, 2)))) + (0.00964f * (__SBREF(__a_sb, 3)))) + (0.00965f * (__SBREF(__a_sb, 4))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { double etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((0.00966f * (__SBREF(__a_sb, -4)))) + (0.00967f * (__SBREF(__a_sb, -3)))) + (0.00968f * (__SBREF(__a_sb, -2)))) + (0.00969f * (__SBREF(__a_sb, -1)))) + (0.22400f * (__REGREF(__a, 0)))) + (0.00971f * (__SBREF(__a_sb, 1)))) + (0.00972f * (__SBREF(__a_sb, 2)))) + (0.00973f * (__SBREF(__a_sb, 3)))) + (0.00974f * (__SBREF(__a_sb, 4)))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { double etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_5_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((0.00975f * (__SBREF(__a_sb, -4)))) + (0.00976f * (__SBREF(__a_sb, -3)))) + (0.00977f * (__SBREF(__a_sb, -2)))) + (0.00978f * (__SBREF(__a_sb, -1)))) + (0.00979f * (__REGREF(__a, 0)))) + (0.00980f * (__SBREF(__a_sb, 1)))) + (0.00981f * (__SBREF(__a_sb, 2)))) + (0.00982f * (__SBREF(__a_sb, 3)))) + (0.00983f * (__SBREF(__a_sb, 4))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_5(out, a) do { double etmp; __CALCEXPR_5_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_6_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((0.00984f * (__SBREF(__a_sb, -4)))) + (0.00985f * (__SBREF(__a_sb, -3)))) + (0.00986f * (__SBREF(__a_sb, -2)))) + (0.00987f * (__SBREF(__a_sb, -1)))) + (0.00988f * (__REGREF(__a, 0)))) + (0.00989f * (__SBREF(__a_sb, 1)))) + (0.00990f * (__SBREF(__a_sb, 2)))) + (0.00991f * (__SBREF(__a_sb, 3)))) + (0.00992f * (__SBREF(__a_sb, 4)))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_6(out, a) do { double etmp; __CALCEXPR_6_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_7_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((0.00993f * (__SBREF(__a_sb, -4)))) + (0.00994f * (__SBREF(__a_sb, -3)))) + (0.00995f * (__SBREF(__a_sb, -2)))) + (0.00996f * (__SBREF(__a_sb, -1)))) + (0.00997f * (__REGREF(__a, 0)))) + (0.00998f * (__SBREF(__a_sb, 1)))) + (0.00999f * (__SBREF(__a_sb, 2)))) + (0.01000f * (__SBREF(__a_sb, 3)))) + (0.01001f * (__SBREF(__a_sb, 4))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_7(out, a) do { double etmp; __CALCEXPR_7_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_8_wrap(__rn0, __a) do { __rn0 = ((((((((((0.01002f * (__SBREF(__a_sb, -4)))) + (0.01003f * (__SBREF(__a_sb, -3)))) + (0.01004f * (__SBREF(__a_sb, -2)))) + (0.01005f * (__SBREF(__a_sb, -1)))) + (0.01006f * (__REGREF(__a, 0)))) + (0.01007f * (__SBREF(__a_sb, 1)))) + (0.01008f * (__SBREF(__a_sb, 2)))) + (0.01009f * (__SBREF(__a_sb, 3)))) + (0.01010f * (__SBREF(__a_sb, 4)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_8(out, a) do { double etmp; __CALCEXPR_8_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); __CALCEXPR_5(out5, reg); __CALCEXPR_6(out6, reg); __CALCEXPR_7(out7, reg); __CALCEXPR_8(out8, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg); } else out4 = reg; } while (0)
#define __CALC2(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg); } else out4 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__STORE(4, __reg_2_4);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__STORE(5, __reg_2_5);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__STORE(6, __reg_2_6);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__STORE(7, __reg_2_7);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__STORE(8, __reg_2_8);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__STORE(8, __reg_2_8);
}
__a_sb = __a_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 17; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 13;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__STORE(__h - 8, __reg_2_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__STORE(__h - 8, __reg_2_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__STORE(__h - 8, __reg_2_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__STORE(__h - 8, __reg_2_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__STORE(__h - 8, __reg_2_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__STORE(__h - 8, __reg_2_5);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__STORE(__h - 8, __reg_2_6);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__STORE(__h - 8, __reg_2_7);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__STORE(__h - 8, __reg_2_8);
__h++;
}
if (0) {}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__STORE(__h - 8, __reg_2_0);
__reg_1_8 = __reg_0;
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__STORE(__h - 7, __reg_2_1);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__STORE(__h - 6, __reg_2_2);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__STORE(__h - 5, __reg_2_3);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__STORE(__h - 4, __reg_2_4);
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__STORE(__h - 3, __reg_2_5);
__CALC2(__reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_7, __reg_2_6, __reg_1_1);
__STORE(__h - 2, __reg_2_6);
__CALC2(__reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_7, __reg_1_2);
__STORE(__h - 1, __reg_2_7);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__STORE(__h - 8, __reg_2_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__STORE(__h - 7, __reg_2_1);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__STORE(__h - 6, __reg_2_2);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__STORE(__h - 5, __reg_2_3);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__STORE(__h - 4, __reg_2_4);
__reg_1_3 = __reg_0;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__STORE(__h - 3, __reg_2_5);
__CALC2(__reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__STORE(__h - 2, __reg_2_6);
__CALC2(__reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_8, __reg_2_7, __reg_1_2);
__STORE(__h - 1, __reg_2_7);
__CALC2(__reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_8, __reg_1_3);
__STORE(__h + 0, __reg_2_8);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__STORE(__h - 8, __reg_2_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__STORE(__h - 7, __reg_2_1);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__STORE(__h - 6, __reg_2_2);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__STORE(__h - 5, __reg_2_3);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__STORE(__h - 4, __reg_2_4);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__STORE(__h - 3, __reg_2_5);
__reg_1_4 = __reg_0;
__CALC2(__reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__STORE(__h - 2, __reg_2_6);
__CALC2(__reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__STORE(__h - 1, __reg_2_7);
__CALC2(__reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_0, __reg_2_8, __reg_1_3);
__STORE(__h + 0, __reg_2_8);
__CALC2(__reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_0, __reg_1_4);
__STORE(__h + 1, __reg_2_0);
}
else if (__h + 7 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__STORE(__h - 8, __reg_2_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__STORE(__h - 7, __reg_2_1);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__STORE(__h - 6, __reg_2_2);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__STORE(__h - 5, __reg_2_3);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__STORE(__h - 4, __reg_2_4);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__STORE(__h - 3, __reg_2_5);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__STORE(__h - 2, __reg_2_6);
__reg_1_5 = __reg_0;
__CALC2(__reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_6, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__STORE(__h - 1, __reg_2_7);
__CALC2(__reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__STORE(__h + 0, __reg_2_8);
__CALC2(__reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_1, __reg_2_0, __reg_1_4);
__STORE(__h + 1, __reg_2_0);
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_5);
__STORE(__h + 2, __reg_2_1);
}
else if (__h + 8 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__STORE(__h - 8, __reg_2_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__STORE(__h - 7, __reg_2_1);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__STORE(__h - 6, __reg_2_2);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__STORE(__h - 5, __reg_2_3);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__STORE(__h - 4, __reg_2_4);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__STORE(__h - 3, __reg_2_5);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__STORE(__h - 2, __reg_2_6);
__reg_1_5 = __reg_0;
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__STORE(__h - 1, __reg_2_7);
__reg_1_6 = __reg_0;
__CALC2(__reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_7, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__STORE(__h + 0, __reg_2_8);
__CALC2(__reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__STORE(__h + 1, __reg_2_0);
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_5);
__STORE(__h + 2, __reg_2_1);
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_6);
__STORE(__h + 3, __reg_2_2);
}
else if (__h + 9 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__STORE(__h - 8, __reg_2_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__STORE(__h - 7, __reg_2_1);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__STORE(__h - 6, __reg_2_2);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__STORE(__h - 5, __reg_2_3);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__STORE(__h - 4, __reg_2_4);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__STORE(__h - 3, __reg_2_5);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__STORE(__h - 2, __reg_2_6);
__reg_1_5 = __reg_0;
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__STORE(__h - 1, __reg_2_7);
__reg_1_6 = __reg_0;
__LOAD(__reg_0, __h + 8);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__STORE(__h + 0, __reg_2_8);
__reg_1_7 = __reg_0;
__CALC2(__reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_8, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__STORE(__h + 1, __reg_2_0);
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__STORE(__h + 2, __reg_2_1);
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_6);
__STORE(__h + 3, __reg_2_2);
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_7);
__STORE(__h + 4, __reg_2_3);
}
else if (__h + 10 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__STORE(__h - 8, __reg_2_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__STORE(__h - 7, __reg_2_1);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__STORE(__h - 6, __reg_2_2);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__STORE(__h - 5, __reg_2_3);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__STORE(__h - 4, __reg_2_4);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__STORE(__h - 3, __reg_2_5);
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__STORE(__h - 2, __reg_2_6);
__reg_1_5 = __reg_0;
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__STORE(__h - 1, __reg_2_7);
__reg_1_6 = __reg_0;
__LOAD(__reg_0, __h + 8);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__STORE(__h + 0, __reg_2_8);
__reg_1_7 = __reg_0;
__LOAD(__reg_0, __h + 9);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__STORE(__h + 1, __reg_2_0);
__reg_1_8 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__STORE(__h + 2, __reg_2_1);
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__STORE(__h + 3, __reg_2_2);
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_7);
__STORE(__h + 4, __reg_2_3);
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_8);
__STORE(__h + 5, __reg_2_4);
}
else if (__h + 11 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__STORE(__h - 8, __reg_2_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__STORE(__h - 7, __reg_2_1);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__STORE(__h - 6, __reg_2_2);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__STORE(__h - 5, __reg_2_3);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__STORE(__h - 4, __reg_2_4);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__STORE(__h - 3, __reg_2_5);
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__STORE(__h - 2, __reg_2_6);
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__STORE(__h - 1, __reg_2_7);
__reg_1_6 = __reg_0;
__LOAD(__reg_0, __h + 8);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__STORE(__h + 0, __reg_2_8);
__reg_1_7 = __reg_0;
__LOAD(__reg_0, __h + 9);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__STORE(__h + 1, __reg_2_0);
__reg_1_8 = __reg_0;
__LOAD(__reg_0, __h + 10);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__STORE(__h + 2, __reg_2_1);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__STORE(__h + 3, __reg_2_2);
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__STORE(__h + 4, __reg_2_3);
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_5, __reg_2_4, __reg_1_8);
__STORE(__h + 5, __reg_2_4);
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_5, __reg_1_0);
__STORE(__h + 6, __reg_2_5);
}
else if (__h + 12 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__STORE(__h - 8, __reg_2_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__STORE(__h - 7, __reg_2_1);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__STORE(__h - 6, __reg_2_2);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__STORE(__h - 5, __reg_2_3);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__STORE(__h - 4, __reg_2_4);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__STORE(__h - 3, __reg_2_5);
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__STORE(__h - 2, __reg_2_6);
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__STORE(__h - 1, __reg_2_7);
__LOAD(__reg_0, __h + 8);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__STORE(__h + 0, __reg_2_8);
__reg_1_7 = __reg_0;
__LOAD(__reg_0, __h + 9);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__STORE(__h + 1, __reg_2_0);
__reg_1_8 = __reg_0;
__LOAD(__reg_0, __h + 10);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__STORE(__h + 2, __reg_2_1);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 11);
__CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__STORE(__h + 3, __reg_2_2);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__STORE(__h + 4, __reg_2_3);
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__STORE(__h + 5, __reg_2_4);
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_6, __reg_2_5, __reg_1_0);
__STORE(__h + 6, __reg_2_5);
__CALC2(__reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_5, __reg_2_6, __reg_1_1);
__STORE(__h + 7, __reg_2_6);
}
}
else
{
for (__h = 17; __h <= __side1LenOl - 9;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__STORE(__h - 8, __reg_2_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__STORE(__h - 8, __reg_2_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__STORE(__h - 8, __reg_2_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__STORE(__h - 8, __reg_2_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__STORE(__h - 8, __reg_2_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__STORE(__h - 8, __reg_2_5);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__STORE(__h - 8, __reg_2_6);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__STORE(__h - 8, __reg_2_7);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__STORE(__h - 8, __reg_2_8);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__CALC2(__reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_4);
__STORE(__h - 8, __reg_2_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__CALC2(__reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_5);
__STORE(__h - 8, __reg_2_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_6);
__STORE(__h - 8, __reg_2_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_1_7);
__STORE(__h - 8, __reg_2_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_1_8);
__STORE(__h - 8, __reg_2_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_2_5, __reg_1_0);
__STORE(__h - 8, __reg_2_5);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_2_6, __reg_1_1);
__STORE(__h - 8, __reg_2_6);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_2_7, __reg_1_2);
__STORE(__h - 8, __reg_2_7);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_7, __reg_2_6, __reg_2_5, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_8, __reg_1_3);
__STORE(__h - 8, __reg_2_8);
__h++;
}
}
__global__ void kernel0_1(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 4 - 4);
const AN5D_TYPE __c1Pad = (4);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 4 - 4);
const AN5D_TYPE __c2Pad = (4);
#define __c2 c2
const AN5D_TYPE __halo1 = 4;
const AN5D_TYPE __halo2 = 4;
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_1_5;
double __reg_1_6;
double __reg_1_7;
double __reg_1_8;
__shared__ double __a_sb_double[__blockSize * 2];
double *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __storeValid = __writeValid1;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((0.00930f * (__SBREF(__a_sb, -4))) + (0.00931f * (__SBREF(__a_sb, -3)))) + (0.00932f * (__SBREF(__a_sb, -2)))) + (0.00933f * (__SBREF(__a_sb, -1)))) + (0.00934f * (__REGREF(__a, 0)))) + (0.00935f * (__SBREF(__a_sb, 1)))) + (0.00936f * (__SBREF(__a_sb, 2)))) + (0.00937f * (__SBREF(__a_sb, 3)))) + (0.00938f * (__SBREF(__a_sb, 4)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((0.00939f * (__SBREF(__a_sb, -4)))) + (0.00940f * (__SBREF(__a_sb, -3)))) + (0.00941f * (__SBREF(__a_sb, -2)))) + (0.00942f * (__SBREF(__a_sb, -1)))) + (0.00943f * (__REGREF(__a, 0)))) + (0.00944f * (__SBREF(__a_sb, 1)))) + (0.00945f * (__SBREF(__a_sb, 2)))) + (0.00946f * (__SBREF(__a_sb, 3)))) + (0.00947f * (__SBREF(__a_sb, 4))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((0.00948f * (__SBREF(__a_sb, -4)))) + (0.00949f * (__SBREF(__a_sb, -3)))) + (0.00950f * (__SBREF(__a_sb, -2)))) + (0.00951f * (__SBREF(__a_sb, -1)))) + (0.00952f * (__REGREF(__a, 0)))) + (0.00953f * (__SBREF(__a_sb, 1)))) + (0.00954f * (__SBREF(__a_sb, 2)))) + (0.00955f * (__SBREF(__a_sb, 3)))) + (0.00956f * (__SBREF(__a_sb, 4)))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((((((((((((((((((((0.00957f * (__SBREF(__a_sb, -4)))) + (0.00958f * (__SBREF(__a_sb, -3)))) + (0.00959f * (__SBREF(__a_sb, -2)))) + (0.00960f * (__SBREF(__a_sb, -1)))) + (0.00961f * (__REGREF(__a, 0)))) + (0.00962f * (__SBREF(__a_sb, 1)))) + (0.00963f * (__SBREF(__a_sb, 2)))) + (0.00964f * (__SBREF(__a_sb, 3)))) + (0.00965f * (__SBREF(__a_sb, 4))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { double etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((0.00966f * (__SBREF(__a_sb, -4)))) + (0.00967f * (__SBREF(__a_sb, -3)))) + (0.00968f * (__SBREF(__a_sb, -2)))) + (0.00969f * (__SBREF(__a_sb, -1)))) + (0.22400f * (__REGREF(__a, 0)))) + (0.00971f * (__SBREF(__a_sb, 1)))) + (0.00972f * (__SBREF(__a_sb, 2)))) + (0.00973f * (__SBREF(__a_sb, 3)))) + (0.00974f * (__SBREF(__a_sb, 4)))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { double etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_5_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((0.00975f * (__SBREF(__a_sb, -4)))) + (0.00976f * (__SBREF(__a_sb, -3)))) + (0.00977f * (__SBREF(__a_sb, -2)))) + (0.00978f * (__SBREF(__a_sb, -1)))) + (0.00979f * (__REGREF(__a, 0)))) + (0.00980f * (__SBREF(__a_sb, 1)))) + (0.00981f * (__SBREF(__a_sb, 2)))) + (0.00982f * (__SBREF(__a_sb, 3)))) + (0.00983f * (__SBREF(__a_sb, 4))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_5(out, a) do { double etmp; __CALCEXPR_5_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_6_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((0.00984f * (__SBREF(__a_sb, -4)))) + (0.00985f * (__SBREF(__a_sb, -3)))) + (0.00986f * (__SBREF(__a_sb, -2)))) + (0.00987f * (__SBREF(__a_sb, -1)))) + (0.00988f * (__REGREF(__a, 0)))) + (0.00989f * (__SBREF(__a_sb, 1)))) + (0.00990f * (__SBREF(__a_sb, 2)))) + (0.00991f * (__SBREF(__a_sb, 3)))) + (0.00992f * (__SBREF(__a_sb, 4)))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_6(out, a) do { double etmp; __CALCEXPR_6_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_7_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((0.00993f * (__SBREF(__a_sb, -4)))) + (0.00994f * (__SBREF(__a_sb, -3)))) + (0.00995f * (__SBREF(__a_sb, -2)))) + (0.00996f * (__SBREF(__a_sb, -1)))) + (0.00997f * (__REGREF(__a, 0)))) + (0.00998f * (__SBREF(__a_sb, 1)))) + (0.00999f * (__SBREF(__a_sb, 2)))) + (0.01000f * (__SBREF(__a_sb, 3)))) + (0.01001f * (__SBREF(__a_sb, 4))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_7(out, a) do { double etmp; __CALCEXPR_7_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_8_wrap(__rn0, __a) do { __rn0 = ((((((((((0.01002f * (__SBREF(__a_sb, -4)))) + (0.01003f * (__SBREF(__a_sb, -3)))) + (0.01004f * (__SBREF(__a_sb, -2)))) + (0.01005f * (__SBREF(__a_sb, -1)))) + (0.01006f * (__REGREF(__a, 0)))) + (0.01007f * (__SBREF(__a_sb, 1)))) + (0.01008f * (__SBREF(__a_sb, 2)))) + (0.01009f * (__SBREF(__a_sb, 3)))) + (0.01010f * (__SBREF(__a_sb, 4)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_8(out, a) do { double etmp; __CALCEXPR_8_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); __CALCEXPR_5(out5, reg); __CALCEXPR_6(out6, reg); __CALCEXPR_7(out7, reg); __CALCEXPR_8(out8, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg); } else out4 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__STORE(4, __reg_1_4);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__STORE(4, __reg_1_4);
}
__a_sb = __a_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 13;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 4, __reg_1_6);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 4, __reg_1_7);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 4, __reg_1_8);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 4, __reg_1_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 4, __reg_1_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h - 4, __reg_1_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 4, __reg_1_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__STORE(__h - 4, __reg_1_4);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 3, __reg_1_6);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 2, __reg_1_7);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_8, __reg_0);
__STORE(__h - 1, __reg_1_8);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 3, __reg_1_6);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 2, __reg_1_7);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 1, __reg_1_8);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 3, __reg_1_6);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 2, __reg_1_7);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 1, __reg_1_8);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
}
else if (__h + 7 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 3, __reg_1_6);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 2, __reg_1_7);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 1, __reg_1_8);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__STORE(__h + 2, __reg_1_2);
}
else if (__h + 8 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 3, __reg_1_6);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 2, __reg_1_7);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 1, __reg_1_8);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h + 2, __reg_1_2);
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__STORE(__h + 3, __reg_1_3);
}
else if (__h + 9 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 3, __reg_1_6);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 2, __reg_1_7);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 1, __reg_1_8);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h + 2, __reg_1_2);
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h + 3, __reg_1_3);
__LOAD(__reg_0, __h + 8);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__STORE(__h + 4, __reg_1_4);
}
else if (__h + 10 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 3, __reg_1_6);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 2, __reg_1_7);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 1, __reg_1_8);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h + 2, __reg_1_2);
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h + 3, __reg_1_3);
__LOAD(__reg_0, __h + 8);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_5, __reg_1_4, __reg_0);
__STORE(__h + 4, __reg_1_4);
__LOAD(__reg_0, __h + 9);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_5, __reg_0);
__STORE(__h + 5, __reg_1_5);
}
else if (__h + 11 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 3, __reg_1_6);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 2, __reg_1_7);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 1, __reg_1_8);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h + 2, __reg_1_2);
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h + 3, __reg_1_3);
__LOAD(__reg_0, __h + 8);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__STORE(__h + 4, __reg_1_4);
__LOAD(__reg_0, __h + 9);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h + 5, __reg_1_5);
__LOAD(__reg_0, __h + 10);
__CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_6, __reg_0);
__STORE(__h + 6, __reg_1_6);
}
else if (__h + 12 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 3, __reg_1_6);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 2, __reg_1_7);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 1, __reg_1_8);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h + 2, __reg_1_2);
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h + 3, __reg_1_3);
__LOAD(__reg_0, __h + 8);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__STORE(__h + 4, __reg_1_4);
__LOAD(__reg_0, __h + 9);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h + 5, __reg_1_5);
__LOAD(__reg_0, __h + 10);
__CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h + 6, __reg_1_6);
__LOAD(__reg_0, __h + 11);
__CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_7, __reg_0);
__STORE(__h + 7, __reg_1_7);
}
}
else
{
for (__h = 9; __h <= __side1LenOl - 9;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 4, __reg_1_6);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 4, __reg_1_7);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 4, __reg_1_8);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 4, __reg_1_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 4, __reg_1_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h - 4, __reg_1_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 4, __reg_1_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__STORE(__h - 4, __reg_1_4);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 4, __reg_1_6);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 4, __reg_1_7);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 4, __reg_1_8);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 4, __reg_1_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 4, __reg_1_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h - 4, __reg_1_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 4, __reg_1_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__STORE(__h - 4, __reg_1_4);
__h++;
}
}
|
a68bd5f6490e57e0741ce998da337cf7221a4b7a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/NativeFunctions.h>
#include <ATen/SparseTensorUtils.h>
#include <ATen/native/sparse/SparseTensorMath.h>
#include <ATen/native/sparse/hip/SparseHIPApplyUtils.cuh>
#include <ATen/native/sparse/hip/SparseHIPBlas.cuh>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/HIPUtils.h>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/WrapDimUtilsMulti.h>
#include <ATen/ExpandUtils.h>
#include <THH/THHTensorMathPointwise.cuh>
#include <THH/THHThrustAllocator.cuh>
#include <thrust/device_ptr.h>
#include <thrust/sequence.h>
#include <thrust/binary_search.h>
#include <thrust/sort.h>
#include <thrust/system/hip/execution_policy.h>
#include <bitset>
#define I_INFO(tensor) cuda::detail::getTensorInfo<int64_t, uint64_t>(tensor)
#define V_INFO(tensor) cuda::detail::getTensorInfo<scalar_t, uint64_t>(tensor)
namespace at { namespace native {
using namespace at::sparse;
using at::cuda::detail::TensorInfo;
using at::cuda::detail::getTensorInfo;
// --------------------------------------------------------------------
// Utility functions
// --------------------------------------------------------------------
namespace {
IntTensor _to_csr_int(const LongTensor& rowIndices, int64_t dim, int64_t nnz) {
IntTensor csr = at::empty({dim+1}, CUDA(kInt));
IntTensor rowIndicesInt = at::empty({rowIndices.size(0)}, CUDA(kInt));
rowIndicesInt.copy_(rowIndices);
sparse::cuda::Xcoo2csr(rowIndicesInt.data_ptr<int32_t>(), nnz, dim, csr.data_ptr<int32_t>());
return csr;
}
}
// NB: Deleted spaddcmul (aka addcmul_, but not actually wired up), spaddcdiv (not
// wired at all)
// --------------------------------------------------------------------
// addmm(Tensor, SparseTensor, Tensor, Scalar, Scalar) [broadcasts]
// --------------------------------------------------------------------
Tensor& s_addmm_out_sparse_dense_cuda(Tensor& r_, const Tensor& t, const SparseTensor& sparse_, const Tensor& dense, Scalar beta, Scalar alpha) {
TORCH_CHECK(t.is_cuda(), "addmm: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(r_.is_cuda(), "addmm: expected 'out' to be CUDA, but got CPU");
TORCH_CHECK(sparse_.is_cuda(), "addmm: expected 'mat1' to be CUDA, but got CPU");
TORCH_CHECK(dense.is_cuda(), "addmm: expected 'mat2' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({sparse_, r_, t, dense}));
TORCH_CHECK(dense.dim() == 2, "addmm: 2D tensor expected, got ", dense.dim(), "D tensor");
TORCH_CHECK(sparse_.sparse_dim() == 2, "addmm: expected first two dims to be sparse (indices has size 2 at first dim), but got ", sparse_.sparse_dim(), " spase dims");
// no need to check dense_dim because dense_dim + sparse_dim = dim
// mxk * kxn = mxn
int64_t m = sparse_.size(0);
int64_t k = sparse_.size(1);
int64_t n = dense.size(1);
TORCH_CHECK(t.size(0) == m,
"addmm: Argument #1 (t): Expected dim 0 size ", m, ", got ", t.size(0));
TORCH_CHECK(t.size(1) == n,
"addmm: Argument #1 (t): Expected dim 1 size ", n, ", got ", t.size(1));
TORCH_CHECK(dense.size(0) == k,
"addmm: Argument #3 (dense): Expected dim 0 size ", k, ", got ", dense.size(0));
r_.resize_({m, n});
SparseTensor sparse = sparse_.coalesce();
int64_t nnz = sparse._nnz();
LongTensor indices = sparse._indices();
Tensor values = sparse._values();
LongTensor rowIndices = indices.select(0, 0);
LongTensor colIndices = indices.select(0, 1);
IntTensor csr = _to_csr_int(rowIndices, m, nnz);
IntTensor colIndicesInt = at::empty({colIndices.size(0)}, indices.options().dtype(kInt));
colIndicesInt.copy_(colIndices);
// No half support, so we don't have to use CUDATypeConversion
Tensor r__;
AT_DISPATCH_FLOATING_TYPES(
values.scalar_type(), "addmm_sparse_cuda", [&] {
scalar_t cast_beta = beta.to<scalar_t>();
scalar_t cast_alpha = alpha.to<scalar_t>();
if (cast_beta == 0) {
r_.zero_();
} else if (cast_beta == 1) {
if (!is_same_tensor(t, r_)) {
r_.copy_(t);
}
} else {
at::mul_out(r_, t, scalar_to_tensor(beta));
}
/* r_ */
if(r_.stride(0) == 1 && r_.stride(1) == r_.size(0)) {
r__ = r_;
} else {
// TODO: how... strange
r__ = r_.transpose(0, 1).clone(at::MemoryFormat::Contiguous);
r__.transpose_(0, 1);
}
if (nnz > 0) {
/* dense */
Tensor dense_;
char transpose_dense;
if(dense.stride(0) == 1 && dense.stride(1) == dense.size(0)) {
transpose_dense = 'n';
dense_ = dense;
} else if(dense.stride(1) == 1 && dense.stride(0) != dense.size(1)) {
transpose_dense = 't';
dense_ = dense;
} else {
transpose_dense = 't';
dense_ = dense.contiguous();
}
sparse::cuda::csrmm2(
'n',
transpose_dense,
m,
n,
k,
nnz,
cast_alpha,
values.data_ptr<scalar_t>(),
csr.data_ptr<int32_t>(),
colIndicesInt.data_ptr<int32_t>(),
dense_.data_ptr<scalar_t>(),
(transpose_dense == 'n' ? dense_.stride(1) : dense_.stride(0)),
cast_beta,
r__.data_ptr<scalar_t>(),
r__.stride(1));
}
});
r_.copy_(r__);
return r_;
}
Tensor& addmm_out_sparse_dense_cuda(
Tensor& result,
const Tensor& self,
const SparseTensor& mat1,
const Tensor& mat2,
Scalar beta,
Scalar alpha
) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {mat1.size(0), mat2.size(1)}, "addmm_out");
return s_addmm_out_sparse_dense_cuda(result, b_self, mat1, mat2, beta, alpha);
}
Tensor s_addmm_sparse_dense_cuda(
const Tensor& t,
const SparseTensor& sparse,
const Tensor& dense,
Scalar beta,
Scalar alpha
) {
Tensor r = at::empty({0}, t.options());
s_addmm_out_sparse_dense_cuda(r, t, sparse, dense, beta, alpha);
return r;
}
Tensor addmm_sparse_dense_cuda(
const Tensor& self,
const SparseTensor& mat1,
const Tensor& mat2,
Scalar beta,
Scalar alpha
) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {mat1.size(0), mat2.size(1)}, "addmm_out");
return s_addmm_sparse_dense_cuda(b_self, mat1, mat2, beta, alpha);
}
Tensor& s_addmm_sparse_dense_cuda_(
Tensor& t,
const SparseTensor& sparse,
const Tensor& dense,
Scalar beta,
Scalar alpha
) {
return s_addmm_out_sparse_dense_cuda(t, t, sparse, dense, beta, alpha);
}
// NB: Purposely no broadcasting version of addmm inplace
// Deleted sspaddmm (sparse, dense) -> sparse
// --------------------------------------------------------------------
// hspmm(SparseTensor mat1, Tensor mat2)
// --------------------------------------------------------------------
SparseTensor& hspmm_out_sparse_cuda(SparseTensor& r_, const SparseTensor& sparse_, const Tensor& dense/* , Scalar alpha */) {
TORCH_CHECK(sparse_.is_cuda(), "hspmm: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(r_.is_cuda(), "hspmm: expected 'out' to be CUDA, but got CPU");
TORCH_CHECK(dense.is_cuda(), "hspmm: expected 'mat2' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({r_, sparse_, dense}));
TORCH_CHECK(sparse_.sparse_dim() == 2,
"hspmm: Argument #2: 2D tensor expected, got ", sparse_.sparse_dim(), "D tensor");
TORCH_CHECK(sparse_.dense_dim() == 0,
"hspmm: Argument #2: scalar values expected, got ", sparse_.dense_dim(), "D values");
TORCH_CHECK(dense.dim() == 2,
"hspmm: Argument #3: 2D tensor expected, got ", dense.dim(), "D tensor");
int64_t m = sparse_.size(0);
int64_t k = sparse_.size(1);
int64_t n = dense.size(1);
TORCH_CHECK(dense.size(0) == k,
"hspmm: Argument #3: Expected dim 0 size ", k, ", got ", dense.size(0));
get_sparse_impl(r_)->resize_and_clear_(1, 1, {m, n});
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::hip::par(allocator).on(stream);
SparseTensor sparse = sparse_.coalesce();
int64_t nnz = sparse._nnz();
LongTensor indices = at::empty({1, nnz}, CUDA(kLong));
// create values in column-major format to avoid copying in spaddmm
Tensor values = at::empty({n, nnz}, dense.options());
values.transpose_(0, 1);
// why does sparse need to be cloned? If this is really necessary maybe we
// need to fuse this with newCoalesce
SparseTensor newSparse = sparse.clone();
LongTensor spIndices = newSparse._indices();
LongTensor dstIndices = spIndices.select(0, 0);
// Save destination indices to output hybrid tensor
indices.copy_(dstIndices);
// Replace destination indices with 0, 1, 2, 3, ... and compute output values
// tensor with sparse * dense multiplication
thrust::device_ptr<int64_t> indicesIter(dstIndices.data_ptr<int64_t>());
thrust::sequence(policy, indicesIter, indicesIter + nnz);
std::vector<int64_t> new_size = get_sparse_impl(newSparse)->sizes().vec();
new_size[0] = nnz;
get_sparse_impl(newSparse)->raw_resize_(get_sparse_impl(newSparse)->sparse_dim(), get_sparse_impl(newSparse)->dense_dim(), new_size);
s_addmm_out_sparse_dense_cuda(values, values, newSparse, dense, 0, /*alpha*/ 1);
get_sparse_impl(r_)->set_indices_and_values_unsafe(indices, values);
return r_;
}
SparseTensor hspmm_sparse_cuda(const SparseTensor& sparse, const Tensor& dense) {
SparseTensor r = at::empty({0}, sparse.options());
hspmm_out_sparse_cuda(r, sparse, dense);
return r;
}
// --------------------------------------------------------------------
// add(Tensor, SparseTensor, Scalar)
// formerly known as spcadd
// --------------------------------------------------------------------
Tensor& add_out_dense_sparse_cuda(Tensor& r_, const Tensor& dense, const SparseTensor& sparse, at::Scalar value) {
TORCH_CHECK(dense.is_cuda(), "add: expected 'self' to be a CUDA tensor, but got a CPU tensor");
TORCH_CHECK(sparse.is_cuda(), "add: expected 'other' to be a CUDA tensor, but got a CPU tensor");
TORCH_CHECK(r_.is_cuda(), "add: expected 'out' to be a CUDA tensor, but got a CPU tensor");
TORCH_CHECK(cuda::check_device({sparse, r_, dense}));
TORCH_CHECK(dense.sizes().equals(sparse.sizes()), "add: expected 'self' and 'other' to have same size, but self has size ",
dense.sizes(), " while other has size ", sparse.sizes(), " (FYI: dense-sparse addition does not currently support broadcasting)");
const int64_t nnz = sparse._nnz();
if (nnz == 0) {
r_.resize_as_(dense);
r_.copy_(dense);
return r_;
}
Tensor r = r_;
if (!is_same_tensor(r, dense)) {
r_.resize_as_(dense);
r_.copy_(dense);
} else {
TORCH_CHECK(r_.is_contiguous(), "add: CUDA dense-sparse addition with a non-contiguous output tensor does not work; shout if you need it (see https://github.com/pytorch/pytorch/issues/1521 )");
r = r_.contiguous();
}
LongTensor indices = sparse._indices();
Tensor values = sparse._values();
int64_t nDim = dense.dim();
int64_t nDimI = sparse.sparse_dim();
if (sparse._values().numel() == 0) {
return r_;
}
if (sparse.is_coalesced()) {
// TODO benchmark to decide whether to remove this special case
const dim3 block = cuda::getApplyBlock();
dim3 grid;
int curDevice = -1;
hipGetDevice(&curDevice);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(curDevice);
if (sparse.dense_dim() == 0) {
TORCH_CHECK(cuda::getApplyGrid(nnz, grid, curDevice), "add: Argument #0: tensor too large or too many dimensions");
AT_DISPATCH_ALL_TYPES_AND(
at::ScalarType::Half, values.scalar_type(), "add_out_dense_sparse_cuda", [&] {
hipLaunchKernelGGL(( apply::sparseElementwiseKernelScalar<TensorCAddOp<scalar_t>, uint64_t, scalar_t>)
, dim3(grid), dim3(block), 0, stream,
TensorCAddOp<scalar_t>(value.to<scalar_t>()),
V_INFO(r_), I_INFO(indices), V_INFO(values),
static_cast<uint64_t>(nnz));
});
} else {
TORCH_CHECK(cuda::getApplyGrid(nnz * block.x, grid, curDevice), "add: Argument #0: tensor too large or too many dimensions");
// sparseElementwiseKernel needs values to be contiguous too
values = values.contiguous();
AT_DISPATCH_ALL_TYPES_AND(
at::ScalarType::Half, values.scalar_type(), "add_out_dense_sparse_cuda", [&] {
hipLaunchKernelGGL(( apply::sparseElementwiseKernel<TensorCAddOp<scalar_t>, uint64_t, scalar_t>)
, dim3(grid), dim3(block), 0, stream,
TensorCAddOp<scalar_t>(value.to<scalar_t>()),
V_INFO(r_), I_INFO(indices), V_INFO(values),
static_cast<uint64_t>(nnz));
});
}
} else {
LongTensor indices1D = flatten_indices(indices, sparse.sizes(), 0);
// FIXME: at some point we can wrap the scale into indexAdd
// NB: Purposely not inplace!
AT_DISPATCH_ALL_TYPES_AND(
at::ScalarType::Half, values.scalar_type(), "add_out_dense_sparse_cuda", [&] {
if (value.to<scalar_t>() != static_cast<scalar_t>(1)) {
values = values.mul(value);
}
});
int64_t view_rows = 1;
int64_t view_columns = 1;
for (int i = 0; i < nDimI; i++) {
view_rows *= r.size(i);
}
for (int i = nDimI; i < nDim; i++) {
view_columns *= r.size(i);
}
Tensor r_view = r.view({view_rows, view_columns});
values = values.reshape({nnz, view_columns});
r_view.index_add_(0, indices1D, values);
}
THCudaCheck(hipGetLastError());
return r_;
}
// --------------------------------------------------------------------
// add(SparseTensor, SparseTensor, Scalar) [broadcasts]
// --------------------------------------------------------------------
Tensor& add_out_dense_sparse_cuda(Tensor& r, const Tensor& dense, const SparseTensor& sparse_, Scalar value);
SparseTensor& add_out_sparse_cuda(SparseTensor& r_, const SparseTensor& t, const SparseTensor& src, Scalar value) {
if (!t.is_sparse()) {
return add_out_dense_sparse_cuda(r_, t, src, value);
}
// TODO: This test seems a bit goofy
TORCH_CHECK(src.is_sparse(), "add(sparse, dense) is not supported. Use add(dense, sparse) instead.");
TORCH_CHECK(t.is_cuda(), "add: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(src.is_cuda(), "add: expected 'other' to be CUDA, but got CPU");
TORCH_CHECK(r_.is_cuda(), "add: expected 'out' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({r_, t, src}));
TORCH_CHECK(t.sizes().equals(src.sizes()), "add: expected 'self' and 'other' to have same size, but ", t.sizes(), " != ", src.sizes());
if (src._nnz() == 0) {
return copy_sparse_to_sparse_(r_, t);
}
if (t._nnz() == 0) {
return mul_out_sparse_scalar(r_, src, value);
}
TORCH_CHECK(is_same_density(t, src), "add: expected 'self' and 'other' to have same density, but 'self' has ", t.sparse_dim(), " sparse dimensions while 'other' has ", src.sparse_dim(), " sparse dimensions");
// We deliberately choose to simply concat the indices and values tensors
// rather than merging them. This removes the need to synchronously fetch nnz
// at the end of the operation, at the cost of having a non-coalesced result.
// This trade-off is preferable for the common use-case of gradient accumulation.
LongTensor t_indices_ = t._indices();
Tensor t_values_ = t._values();
LongTensor s_indices_ = src._indices();
Tensor s_values_ = src._values();
AT_DISPATCH_ALL_TYPES_AND(
at::ScalarType::Half, s_values_.scalar_type(), "add_out_sparse_cuda", [&] {
if (value.to<scalar_t>() != static_cast<scalar_t>(1)) {
s_values_ = s_values_.mul(value);
}
});
LongTensor r_indices_ = at::cat({t_indices_, s_indices_}, 1);
Tensor r_values_ = at::cat({t_values_, s_values_}, 0);
r_.resize_as_(src);
alias_into_sparse(r_, r_indices_, r_values_);
// FIXME: add some heuristic about when to call coalesce() here, so that
// tensors don't totally blow up in size by concatenation; e.g.
// r->minUnique = max(a->minUnique + b->minUnique);
// if (r->nnz / r->minUnique > COMPACTION_THRESHOLD) {
// THCSTensor_(contiguous)(r);
// r->minUnique = r->nnz;
// }
return r_;
}
// --------------------------------------------------------------------
// mul(SparseTensor, SparseTensor) [broadcasts]
// --------------------------------------------------------------------
SparseTensor& mul_out_sparse_cuda(SparseTensor& r_, const SparseTensor& t_, const SparseTensor& src_) {
if (src_.dim() == 0) {
return mul_out_sparse_zerodim(r_, t_, src_);
} else if (t_.dim() == 0) {
return mul_out_sparse_zerodim(r_, src_, t_);
}
TORCH_CHECK(t_.is_cuda(), "mul: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(src_.is_cuda(), "mul: expected 'other' to be CUDA, but got CPU");
TORCH_CHECK(r_.is_cuda(), "mul: expected 'out' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({r_, t_, src_}));
TORCH_CHECK(t_.sizes().equals(src_.sizes()), "mul: expected 'self' and 'other' to have same size, but ", t_.sizes(), " != ", src_.sizes());
SparseTensor t = t_.coalesce();
SparseTensor src = src_.coalesce();
if (src_._nnz() == 0 || t_._nnz() == 0) {
r_.resize_as_(src_);
return r_.zero_();
}
// saving those because they can be overwritten when doing in-place operations
int64_t t_nnz = t._nnz(), s_nnz = src._nnz();
int64_t max_nnz = ::min(t_nnz, s_nnz); // multiply by zero is zero, and can be dropped
int64_t sparse_dim = src.sparse_dim();
LongTensor t_indices_ = t._indices();
Tensor t_values_ = t._values();
LongTensor s_indices_ = src._indices();
Tensor s_values_ = src._values();
LongTensor r_indices_ = at::empty({sparse_dim, max_nnz}, t_indices_.options());
Tensor r_values_ = new_values_with_size_of(t_values_, max_nnz).zero_();
r_.resize_as_(src);
get_sparse_impl(r_)->set_indices_and_values_unsafe(r_indices_, r_values_);
int64_t valueSize = t_values_.stride(0);
const dim3 block = dim3(::min(static_cast<int64_t>(cuda::getApplyBlock().x), valueSize));
dim3 grid;
int curDevice = -1;
hipGetDevice(&curDevice);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(curDevice);
TORCH_CHECK(cuda::getApplyGrid(valueSize, grid, curDevice), "mul: Argument #0: tensor too large or too many dimensions");
LongTensor resultNnz = at::empty({1}, CUDA(kLong));
AT_DISPATCH_ALL_TYPES_AND(
at::ScalarType::Half, t_values_.scalar_type(), "mul_out_sparse_cuda", [&] {
hipLaunchKernelGGL(( apply::valueSparseIntersectionKernel<TensorMulOp<scalar_t>, uint64_t, scalar_t>)
, dim3(grid), dim3(block), 0, stream,
TensorMulOp<scalar_t>(),
I_INFO(r_indices_), I_INFO(t_indices_), I_INFO(s_indices_),
V_INFO(r_values_), V_INFO(t_values_), V_INFO(s_values_),
static_cast<uint64_t>(t_nnz), static_cast<uint64_t>(s_nnz));
THCudaCheck(hipGetLastError());
hipLaunchKernelGGL(( apply::indexSparseIntersectionKernel<uint64_t, scalar_t>)
, dim3(1), dim3(1), 0, stream,
I_INFO(r_indices_), I_INFO(t_indices_), I_INFO(s_indices_),
// reinterpret_cast shenanigans, because we don't actually have
// unsigned tensors...
static_cast<uint64_t>(t_nnz), static_cast<uint64_t>(s_nnz), reinterpret_cast<uint64_t*>(resultNnz.data_ptr()));
THCudaCheck(hipGetLastError());
});
// sync! (surely there is a more idiomatic way to do this...)
LongTensor cpu_resultNnz = at::empty({1}, CPU(kLong));
cpu_resultNnz.copy_(resultNnz);
get_sparse_impl(r_)->set_nnz_and_narrow(cpu_resultNnz.accessor<int64_t, 1>()[0]);
return r_._coalesced_(true);
}
// --------------------------------------------------------------------
// sparse.sum() backward
//
// see NOTE [ sparse.sum() backward ]
// --------------------------------------------------------------------
template <typename scalar_t>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(512)
#endif
__global__ void _sparse_sum_backward_cuda_kernel(
int64_t total_threads,
const TensorInfo<int64_t, int64_t> grad_indices_ti,
const TensorInfo<int64_t, int64_t> input_indices_ti,
const TensorInfo<int64_t, int64_t> input_indices_pos_ti,
const TensorInfo<scalar_t, int64_t> grad_values_expand_ti,
TensorInfo<scalar_t, int64_t> grad_input_values_ti
) {
const int64_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= total_threads) return;
const int64_t j = input_indices_pos_ti.data[i];
bool has_match = false;
if (grad_indices_ti.data[j] == input_indices_ti.data[i]) {
has_match = true;
}
int64_t grad_input_values_stride0 = grad_input_values_ti.strides[0];
int64_t out_start = i * grad_input_values_stride0;
int64_t out_end = (i + 1) * grad_input_values_stride0;
int64_t in_start = j * grad_values_expand_ti.strides[0];
if (has_match) {
for (int64_t out_i = out_start, in_i = in_start; out_i < out_end; out_i++, in_i++) {
grad_input_values_ti.data[out_i] = grad_values_expand_ti.data[in_i];
}
}
else {
for (int64_t out_i = out_start; out_i < out_end; out_i++) {
grad_input_values_ti.data[out_i] = scalar_t(0);
}
}
}
Tensor _sparse_sum_backward_cuda(const Tensor& grad_, const SparseTensor& input_, IntArrayRef dims_to_sum) {
TORCH_CHECK(grad_.is_cuda(), "_sparse_sum_backward_cuda: expected 'grad_' to be CUDA tensor, but got CPU tensor");
TORCH_CHECK(input_.is_cuda(), "_sparse_sum_backward_cuda: expected 'input_' to be CUDA tensor, but got CPU tensor");
auto input = input_.coalesce();
const int64_t input_dim = input.dim();
auto dims_to_sum_b = dim_list_to_bitset(dims_to_sum, input_dim);
auto dims_to_sum_v = dims_to_sum.vec();
maybe_wrap_dims(dims_to_sum_v, input_dim);
LongTensor input_indices = input._indices();
Tensor input_values = input._values();
IntArrayRef input_sizes = input.sizes();
const int64_t input_sparse_dim = input.sparse_dim();
const int64_t input_dense_dim = input.dense_dim();
const int64_t input_nnz = input._nnz();
int64_t sparse_dims_to_sum_size = 0;
auto sparse_dims_to_keep_v = std::vector<int64_t>();
auto dense_dims_to_sum_v = std::vector<int64_t>();
for (int64_t d = 0; d < input_dim; d++) {
if (dims_to_sum_b[d]) {
if (d < input_sparse_dim) sparse_dims_to_sum_size ++;
else dense_dims_to_sum_v.emplace_back(d + 1 - input_sparse_dim);
}
else {
if (d < input_sparse_dim) sparse_dims_to_keep_v.emplace_back(d);
}
}
const bool sum_all_sparse_dim = (input_sparse_dim == sparse_dims_to_sum_size);
const bool sum_dense_dim = (dense_dims_to_sum_v.size() > 0);
const bool sum_sparse_dim = (sparse_dims_to_sum_size > 0);
if (sum_all_sparse_dim) {
TORCH_CHECK(!grad_.is_sparse(), "_sparse_sum_backward_cuda: expected grad Tensor to be dense since all sparse dims are summed");
auto grad_input_values = grad_;
auto expand_size = input_values.sizes().vec();
if (sum_dense_dim) {
auto dense_expand_size = std::vector<int64_t>(expand_size);
dense_expand_size.erase(dense_expand_size.begin()); // remove nnz dim
for (auto d : dense_dims_to_sum_v) grad_input_values = grad_input_values.unsqueeze(d - 1); // -1 since grad has no nnz dim
grad_input_values = grad_input_values.expand(dense_expand_size);
}
grad_input_values = grad_input_values.expand(expand_size).clone(at::MemoryFormat::Contiguous);
return at::_sparse_coo_tensor_with_dims_and_tensors(input_sparse_dim, input_dense_dim, input_sizes, input_indices.clone(at::MemoryFormat::Contiguous), grad_input_values, input.options().dtype(grad_.dtype())); // convert to grad dtype
}
else {
TORCH_CHECK(grad_.is_sparse(), "_sparse_sum_backward_cuda: expected grad_ Tensor to be sparse, but got dense");
auto grad = grad_.coalesce();
LongTensor grad_indices = grad._indices();
Tensor grad_values = grad._values();
const int64_t grad_sparse_dim = grad.sparse_dim();
const int64_t grad_nnz = grad._nnz();
Tensor grad_values_expand = grad_values;
if (sum_dense_dim) {
auto expand_size = input_values.sizes().vec();
if (sum_sparse_dim) expand_size[0] = grad_values.size(0); // update nnz
for (auto d : dense_dims_to_sum_v) grad_values_expand = grad_values_expand.unsqueeze(d);
grad_values_expand = grad_values_expand.expand(expand_size).clone(at::MemoryFormat::Contiguous);
}
Tensor grad_input_values;
if (!sum_sparse_dim) {
grad_input_values = grad_values_expand;
}
else {
int curDevice = -1;
hipGetDevice(&curDevice);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(curDevice);
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::hip::par(allocator).on(stream);
typedef thrust::device_ptr<int64_t> thrust_ptr;
grad_input_values = at::empty_like(input_values, grad_values.options());
AT_ASSERT(grad_input_values.is_cuda());
// get 1D indices
auto grad_sparse_dim_to_keep_v = std::vector<int64_t>(grad_sparse_dim);
std::iota(grad_sparse_dim_to_keep_v.begin(), grad_sparse_dim_to_keep_v.end(), 0);
auto grad_indices_1D = flatten_indices_by_dims(grad_indices, grad.sizes(), grad_sparse_dim_to_keep_v); // flatten indices on all sparse_dim of grad, output indices is coalesced and sorted
auto input_indices_1D = flatten_indices_by_dims(input_indices, input_sizes, sparse_dims_to_keep_v);
thrust_ptr grad_indices_iter(grad_indices_1D.data_ptr<int64_t>());
thrust_ptr input_indices_iter(input_indices_1D.data_ptr<int64_t>());
// store lower_bound of input indices at grad indices
LongTensor input_indices_pos = at::empty_like(input_indices_1D);
thrust_ptr input_indices_pos_iter(input_indices_pos.data_ptr<int64_t>());
thrust::lower_bound(policy,
grad_indices_iter, grad_indices_iter + grad_nnz,
input_indices_iter, input_indices_iter + input_nnz,
input_indices_pos_iter);
// config to run cuda kernel
int64_t total_threads = input_nnz;
const dim3 block = dim3(::min(static_cast<int64_t>(cuda::getApplyBlock().x), total_threads));
dim3 grid;
TORCH_CHECK(cuda::getApplyGrid(total_threads, grid, curDevice), "_sparse_sum_backward_cuda: input too large or too many dimensions");
auto grad_indices_ti = getTensorInfo<int64_t, int64_t>(grad_indices_1D);
auto input_indices_ti = getTensorInfo<int64_t, int64_t>(input_indices_1D);
auto input_indices_pos_ti = getTensorInfo<int64_t, int64_t>(input_indices_pos);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad_values.scalar_type(), "_sparse_sum_backward_cuda", [&] {
auto grad_values_expand_ti = getTensorInfo<scalar_t, int64_t>(grad_values_expand);
auto grad_input_values_ti = getTensorInfo<scalar_t, int64_t>(grad_input_values);
hipLaunchKernelGGL(( _sparse_sum_backward_cuda_kernel<scalar_t>), dim3(grid), dim3(block), 0, stream,
total_threads,
grad_indices_ti,
input_indices_ti,
input_indices_pos_ti,
grad_values_expand_ti,
grad_input_values_ti
);
});
}
return at::_sparse_coo_tensor_with_dims_and_tensors(input_sparse_dim, input_dense_dim, input_sizes, input_indices.clone(at::MemoryFormat::Contiguous), grad_input_values, grad.options());
}
}
}} // namespace at::native
| a68bd5f6490e57e0741ce998da337cf7221a4b7a.cu | #include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/NativeFunctions.h>
#include <ATen/SparseTensorUtils.h>
#include <ATen/native/sparse/SparseTensorMath.h>
#include <ATen/native/sparse/cuda/SparseCUDAApplyUtils.cuh>
#include <ATen/native/sparse/cuda/SparseCUDABlas.cuh>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/CUDAUtils.h>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/WrapDimUtilsMulti.h>
#include <ATen/ExpandUtils.h>
#include <THC/THCTensorMathPointwise.cuh>
#include <THC/THCThrustAllocator.cuh>
#include <thrust/device_ptr.h>
#include <thrust/sequence.h>
#include <thrust/binary_search.h>
#include <thrust/sort.h>
#include <thrust/system/cuda/execution_policy.h>
#include <bitset>
#define I_INFO(tensor) cuda::detail::getTensorInfo<int64_t, uint64_t>(tensor)
#define V_INFO(tensor) cuda::detail::getTensorInfo<scalar_t, uint64_t>(tensor)
namespace at { namespace native {
using namespace at::sparse;
using at::cuda::detail::TensorInfo;
using at::cuda::detail::getTensorInfo;
// --------------------------------------------------------------------
// Utility functions
// --------------------------------------------------------------------
namespace {
IntTensor _to_csr_int(const LongTensor& rowIndices, int64_t dim, int64_t nnz) {
IntTensor csr = at::empty({dim+1}, CUDA(kInt));
IntTensor rowIndicesInt = at::empty({rowIndices.size(0)}, CUDA(kInt));
rowIndicesInt.copy_(rowIndices);
sparse::cuda::Xcoo2csr(rowIndicesInt.data_ptr<int32_t>(), nnz, dim, csr.data_ptr<int32_t>());
return csr;
}
}
// NB: Deleted spaddcmul (aka addcmul_, but not actually wired up), spaddcdiv (not
// wired at all)
// --------------------------------------------------------------------
// addmm(Tensor, SparseTensor, Tensor, Scalar, Scalar) [broadcasts]
// --------------------------------------------------------------------
Tensor& s_addmm_out_sparse_dense_cuda(Tensor& r_, const Tensor& t, const SparseTensor& sparse_, const Tensor& dense, Scalar beta, Scalar alpha) {
TORCH_CHECK(t.is_cuda(), "addmm: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(r_.is_cuda(), "addmm: expected 'out' to be CUDA, but got CPU");
TORCH_CHECK(sparse_.is_cuda(), "addmm: expected 'mat1' to be CUDA, but got CPU");
TORCH_CHECK(dense.is_cuda(), "addmm: expected 'mat2' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({sparse_, r_, t, dense}));
TORCH_CHECK(dense.dim() == 2, "addmm: 2D tensor expected, got ", dense.dim(), "D tensor");
TORCH_CHECK(sparse_.sparse_dim() == 2, "addmm: expected first two dims to be sparse (indices has size 2 at first dim), but got ", sparse_.sparse_dim(), " spase dims");
// no need to check dense_dim because dense_dim + sparse_dim = dim
// mxk * kxn = mxn
int64_t m = sparse_.size(0);
int64_t k = sparse_.size(1);
int64_t n = dense.size(1);
TORCH_CHECK(t.size(0) == m,
"addmm: Argument #1 (t): Expected dim 0 size ", m, ", got ", t.size(0));
TORCH_CHECK(t.size(1) == n,
"addmm: Argument #1 (t): Expected dim 1 size ", n, ", got ", t.size(1));
TORCH_CHECK(dense.size(0) == k,
"addmm: Argument #3 (dense): Expected dim 0 size ", k, ", got ", dense.size(0));
r_.resize_({m, n});
SparseTensor sparse = sparse_.coalesce();
int64_t nnz = sparse._nnz();
LongTensor indices = sparse._indices();
Tensor values = sparse._values();
LongTensor rowIndices = indices.select(0, 0);
LongTensor colIndices = indices.select(0, 1);
IntTensor csr = _to_csr_int(rowIndices, m, nnz);
IntTensor colIndicesInt = at::empty({colIndices.size(0)}, indices.options().dtype(kInt));
colIndicesInt.copy_(colIndices);
// No half support, so we don't have to use CUDATypeConversion
Tensor r__;
AT_DISPATCH_FLOATING_TYPES(
values.scalar_type(), "addmm_sparse_cuda", [&] {
scalar_t cast_beta = beta.to<scalar_t>();
scalar_t cast_alpha = alpha.to<scalar_t>();
if (cast_beta == 0) {
r_.zero_();
} else if (cast_beta == 1) {
if (!is_same_tensor(t, r_)) {
r_.copy_(t);
}
} else {
at::mul_out(r_, t, scalar_to_tensor(beta));
}
/* r_ */
if(r_.stride(0) == 1 && r_.stride(1) == r_.size(0)) {
r__ = r_;
} else {
// TODO: how... strange
r__ = r_.transpose(0, 1).clone(at::MemoryFormat::Contiguous);
r__.transpose_(0, 1);
}
if (nnz > 0) {
/* dense */
Tensor dense_;
char transpose_dense;
if(dense.stride(0) == 1 && dense.stride(1) == dense.size(0)) {
transpose_dense = 'n';
dense_ = dense;
} else if(dense.stride(1) == 1 && dense.stride(0) != dense.size(1)) {
transpose_dense = 't';
dense_ = dense;
} else {
transpose_dense = 't';
dense_ = dense.contiguous();
}
sparse::cuda::csrmm2(
'n',
transpose_dense,
m,
n,
k,
nnz,
cast_alpha,
values.data_ptr<scalar_t>(),
csr.data_ptr<int32_t>(),
colIndicesInt.data_ptr<int32_t>(),
dense_.data_ptr<scalar_t>(),
(transpose_dense == 'n' ? dense_.stride(1) : dense_.stride(0)),
cast_beta,
r__.data_ptr<scalar_t>(),
r__.stride(1));
}
});
r_.copy_(r__);
return r_;
}
Tensor& addmm_out_sparse_dense_cuda(
Tensor& result,
const Tensor& self,
const SparseTensor& mat1,
const Tensor& mat2,
Scalar beta,
Scalar alpha
) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {mat1.size(0), mat2.size(1)}, "addmm_out");
return s_addmm_out_sparse_dense_cuda(result, b_self, mat1, mat2, beta, alpha);
}
Tensor s_addmm_sparse_dense_cuda(
const Tensor& t,
const SparseTensor& sparse,
const Tensor& dense,
Scalar beta,
Scalar alpha
) {
Tensor r = at::empty({0}, t.options());
s_addmm_out_sparse_dense_cuda(r, t, sparse, dense, beta, alpha);
return r;
}
Tensor addmm_sparse_dense_cuda(
const Tensor& self,
const SparseTensor& mat1,
const Tensor& mat2,
Scalar beta,
Scalar alpha
) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {mat1.size(0), mat2.size(1)}, "addmm_out");
return s_addmm_sparse_dense_cuda(b_self, mat1, mat2, beta, alpha);
}
Tensor& s_addmm_sparse_dense_cuda_(
Tensor& t,
const SparseTensor& sparse,
const Tensor& dense,
Scalar beta,
Scalar alpha
) {
return s_addmm_out_sparse_dense_cuda(t, t, sparse, dense, beta, alpha);
}
// NB: Purposely no broadcasting version of addmm inplace
// Deleted sspaddmm (sparse, dense) -> sparse
// --------------------------------------------------------------------
// hspmm(SparseTensor mat1, Tensor mat2)
// --------------------------------------------------------------------
SparseTensor& hspmm_out_sparse_cuda(SparseTensor& r_, const SparseTensor& sparse_, const Tensor& dense/* , Scalar alpha */) {
TORCH_CHECK(sparse_.is_cuda(), "hspmm: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(r_.is_cuda(), "hspmm: expected 'out' to be CUDA, but got CPU");
TORCH_CHECK(dense.is_cuda(), "hspmm: expected 'mat2' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({r_, sparse_, dense}));
TORCH_CHECK(sparse_.sparse_dim() == 2,
"hspmm: Argument #2: 2D tensor expected, got ", sparse_.sparse_dim(), "D tensor");
TORCH_CHECK(sparse_.dense_dim() == 0,
"hspmm: Argument #2: scalar values expected, got ", sparse_.dense_dim(), "D values");
TORCH_CHECK(dense.dim() == 2,
"hspmm: Argument #3: 2D tensor expected, got ", dense.dim(), "D tensor");
int64_t m = sparse_.size(0);
int64_t k = sparse_.size(1);
int64_t n = dense.size(1);
TORCH_CHECK(dense.size(0) == k,
"hspmm: Argument #3: Expected dim 0 size ", k, ", got ", dense.size(0));
get_sparse_impl(r_)->resize_and_clear_(1, 1, {m, n});
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::cuda::par(allocator).on(stream);
SparseTensor sparse = sparse_.coalesce();
int64_t nnz = sparse._nnz();
LongTensor indices = at::empty({1, nnz}, CUDA(kLong));
// create values in column-major format to avoid copying in spaddmm
Tensor values = at::empty({n, nnz}, dense.options());
values.transpose_(0, 1);
// why does sparse need to be cloned? If this is really necessary maybe we
// need to fuse this with newCoalesce
SparseTensor newSparse = sparse.clone();
LongTensor spIndices = newSparse._indices();
LongTensor dstIndices = spIndices.select(0, 0);
// Save destination indices to output hybrid tensor
indices.copy_(dstIndices);
// Replace destination indices with 0, 1, 2, 3, ... and compute output values
// tensor with sparse * dense multiplication
thrust::device_ptr<int64_t> indicesIter(dstIndices.data_ptr<int64_t>());
thrust::sequence(policy, indicesIter, indicesIter + nnz);
std::vector<int64_t> new_size = get_sparse_impl(newSparse)->sizes().vec();
new_size[0] = nnz;
get_sparse_impl(newSparse)->raw_resize_(get_sparse_impl(newSparse)->sparse_dim(), get_sparse_impl(newSparse)->dense_dim(), new_size);
s_addmm_out_sparse_dense_cuda(values, values, newSparse, dense, 0, /*alpha*/ 1);
get_sparse_impl(r_)->set_indices_and_values_unsafe(indices, values);
return r_;
}
SparseTensor hspmm_sparse_cuda(const SparseTensor& sparse, const Tensor& dense) {
SparseTensor r = at::empty({0}, sparse.options());
hspmm_out_sparse_cuda(r, sparse, dense);
return r;
}
// --------------------------------------------------------------------
// add(Tensor, SparseTensor, Scalar)
// formerly known as spcadd
// --------------------------------------------------------------------
Tensor& add_out_dense_sparse_cuda(Tensor& r_, const Tensor& dense, const SparseTensor& sparse, at::Scalar value) {
TORCH_CHECK(dense.is_cuda(), "add: expected 'self' to be a CUDA tensor, but got a CPU tensor");
TORCH_CHECK(sparse.is_cuda(), "add: expected 'other' to be a CUDA tensor, but got a CPU tensor");
TORCH_CHECK(r_.is_cuda(), "add: expected 'out' to be a CUDA tensor, but got a CPU tensor");
TORCH_CHECK(cuda::check_device({sparse, r_, dense}));
TORCH_CHECK(dense.sizes().equals(sparse.sizes()), "add: expected 'self' and 'other' to have same size, but self has size ",
dense.sizes(), " while other has size ", sparse.sizes(), " (FYI: dense-sparse addition does not currently support broadcasting)");
const int64_t nnz = sparse._nnz();
if (nnz == 0) {
r_.resize_as_(dense);
r_.copy_(dense);
return r_;
}
Tensor r = r_;
if (!is_same_tensor(r, dense)) {
r_.resize_as_(dense);
r_.copy_(dense);
} else {
TORCH_CHECK(r_.is_contiguous(), "add: CUDA dense-sparse addition with a non-contiguous output tensor does not work; shout if you need it (see https://github.com/pytorch/pytorch/issues/1521 )");
r = r_.contiguous();
}
LongTensor indices = sparse._indices();
Tensor values = sparse._values();
int64_t nDim = dense.dim();
int64_t nDimI = sparse.sparse_dim();
if (sparse._values().numel() == 0) {
return r_;
}
if (sparse.is_coalesced()) {
// TODO benchmark to decide whether to remove this special case
const dim3 block = cuda::getApplyBlock();
dim3 grid;
int curDevice = -1;
cudaGetDevice(&curDevice);
cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);
if (sparse.dense_dim() == 0) {
TORCH_CHECK(cuda::getApplyGrid(nnz, grid, curDevice), "add: Argument #0: tensor too large or too many dimensions");
AT_DISPATCH_ALL_TYPES_AND(
at::ScalarType::Half, values.scalar_type(), "add_out_dense_sparse_cuda", [&] {
apply::sparseElementwiseKernelScalar<TensorCAddOp<scalar_t>, uint64_t, scalar_t>
<<<grid, block, 0, stream>>>(
TensorCAddOp<scalar_t>(value.to<scalar_t>()),
V_INFO(r_), I_INFO(indices), V_INFO(values),
static_cast<uint64_t>(nnz));
});
} else {
TORCH_CHECK(cuda::getApplyGrid(nnz * block.x, grid, curDevice), "add: Argument #0: tensor too large or too many dimensions");
// sparseElementwiseKernel needs values to be contiguous too
values = values.contiguous();
AT_DISPATCH_ALL_TYPES_AND(
at::ScalarType::Half, values.scalar_type(), "add_out_dense_sparse_cuda", [&] {
apply::sparseElementwiseKernel<TensorCAddOp<scalar_t>, uint64_t, scalar_t>
<<<grid, block, 0, stream>>>(
TensorCAddOp<scalar_t>(value.to<scalar_t>()),
V_INFO(r_), I_INFO(indices), V_INFO(values),
static_cast<uint64_t>(nnz));
});
}
} else {
LongTensor indices1D = flatten_indices(indices, sparse.sizes(), 0);
// FIXME: at some point we can wrap the scale into indexAdd
// NB: Purposely not inplace!
AT_DISPATCH_ALL_TYPES_AND(
at::ScalarType::Half, values.scalar_type(), "add_out_dense_sparse_cuda", [&] {
if (value.to<scalar_t>() != static_cast<scalar_t>(1)) {
values = values.mul(value);
}
});
int64_t view_rows = 1;
int64_t view_columns = 1;
for (int i = 0; i < nDimI; i++) {
view_rows *= r.size(i);
}
for (int i = nDimI; i < nDim; i++) {
view_columns *= r.size(i);
}
Tensor r_view = r.view({view_rows, view_columns});
values = values.reshape({nnz, view_columns});
r_view.index_add_(0, indices1D, values);
}
THCudaCheck(cudaGetLastError());
return r_;
}
// --------------------------------------------------------------------
// add(SparseTensor, SparseTensor, Scalar) [broadcasts]
// --------------------------------------------------------------------
Tensor& add_out_dense_sparse_cuda(Tensor& r, const Tensor& dense, const SparseTensor& sparse_, Scalar value);
SparseTensor& add_out_sparse_cuda(SparseTensor& r_, const SparseTensor& t, const SparseTensor& src, Scalar value) {
if (!t.is_sparse()) {
return add_out_dense_sparse_cuda(r_, t, src, value);
}
// TODO: This test seems a bit goofy
TORCH_CHECK(src.is_sparse(), "add(sparse, dense) is not supported. Use add(dense, sparse) instead.");
TORCH_CHECK(t.is_cuda(), "add: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(src.is_cuda(), "add: expected 'other' to be CUDA, but got CPU");
TORCH_CHECK(r_.is_cuda(), "add: expected 'out' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({r_, t, src}));
TORCH_CHECK(t.sizes().equals(src.sizes()), "add: expected 'self' and 'other' to have same size, but ", t.sizes(), " != ", src.sizes());
if (src._nnz() == 0) {
return copy_sparse_to_sparse_(r_, t);
}
if (t._nnz() == 0) {
return mul_out_sparse_scalar(r_, src, value);
}
TORCH_CHECK(is_same_density(t, src), "add: expected 'self' and 'other' to have same density, but 'self' has ", t.sparse_dim(), " sparse dimensions while 'other' has ", src.sparse_dim(), " sparse dimensions");
// We deliberately choose to simply concat the indices and values tensors
// rather than merging them. This removes the need to synchronously fetch nnz
// at the end of the operation, at the cost of having a non-coalesced result.
// This trade-off is preferable for the common use-case of gradient accumulation.
LongTensor t_indices_ = t._indices();
Tensor t_values_ = t._values();
LongTensor s_indices_ = src._indices();
Tensor s_values_ = src._values();
AT_DISPATCH_ALL_TYPES_AND(
at::ScalarType::Half, s_values_.scalar_type(), "add_out_sparse_cuda", [&] {
if (value.to<scalar_t>() != static_cast<scalar_t>(1)) {
s_values_ = s_values_.mul(value);
}
});
LongTensor r_indices_ = at::cat({t_indices_, s_indices_}, 1);
Tensor r_values_ = at::cat({t_values_, s_values_}, 0);
r_.resize_as_(src);
alias_into_sparse(r_, r_indices_, r_values_);
// FIXME: add some heuristic about when to call coalesce() here, so that
// tensors don't totally blow up in size by concatenation; e.g.
// r->minUnique = max(a->minUnique + b->minUnique);
// if (r->nnz / r->minUnique > COMPACTION_THRESHOLD) {
// THCSTensor_(contiguous)(r);
// r->minUnique = r->nnz;
// }
return r_;
}
// --------------------------------------------------------------------
// mul(SparseTensor, SparseTensor) [broadcasts]
// --------------------------------------------------------------------
SparseTensor& mul_out_sparse_cuda(SparseTensor& r_, const SparseTensor& t_, const SparseTensor& src_) {
if (src_.dim() == 0) {
return mul_out_sparse_zerodim(r_, t_, src_);
} else if (t_.dim() == 0) {
return mul_out_sparse_zerodim(r_, src_, t_);
}
TORCH_CHECK(t_.is_cuda(), "mul: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(src_.is_cuda(), "mul: expected 'other' to be CUDA, but got CPU");
TORCH_CHECK(r_.is_cuda(), "mul: expected 'out' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({r_, t_, src_}));
TORCH_CHECK(t_.sizes().equals(src_.sizes()), "mul: expected 'self' and 'other' to have same size, but ", t_.sizes(), " != ", src_.sizes());
SparseTensor t = t_.coalesce();
SparseTensor src = src_.coalesce();
if (src_._nnz() == 0 || t_._nnz() == 0) {
r_.resize_as_(src_);
return r_.zero_();
}
// saving those because they can be overwritten when doing in-place operations
int64_t t_nnz = t._nnz(), s_nnz = src._nnz();
int64_t max_nnz = std::min(t_nnz, s_nnz); // multiply by zero is zero, and can be dropped
int64_t sparse_dim = src.sparse_dim();
LongTensor t_indices_ = t._indices();
Tensor t_values_ = t._values();
LongTensor s_indices_ = src._indices();
Tensor s_values_ = src._values();
LongTensor r_indices_ = at::empty({sparse_dim, max_nnz}, t_indices_.options());
Tensor r_values_ = new_values_with_size_of(t_values_, max_nnz).zero_();
r_.resize_as_(src);
get_sparse_impl(r_)->set_indices_and_values_unsafe(r_indices_, r_values_);
int64_t valueSize = t_values_.stride(0);
const dim3 block = dim3(std::min(static_cast<int64_t>(cuda::getApplyBlock().x), valueSize));
dim3 grid;
int curDevice = -1;
cudaGetDevice(&curDevice);
cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);
TORCH_CHECK(cuda::getApplyGrid(valueSize, grid, curDevice), "mul: Argument #0: tensor too large or too many dimensions");
LongTensor resultNnz = at::empty({1}, CUDA(kLong));
AT_DISPATCH_ALL_TYPES_AND(
at::ScalarType::Half, t_values_.scalar_type(), "mul_out_sparse_cuda", [&] {
apply::valueSparseIntersectionKernel<TensorMulOp<scalar_t>, uint64_t, scalar_t>
<<<grid, block, 0, stream>>>(
TensorMulOp<scalar_t>(),
I_INFO(r_indices_), I_INFO(t_indices_), I_INFO(s_indices_),
V_INFO(r_values_), V_INFO(t_values_), V_INFO(s_values_),
static_cast<uint64_t>(t_nnz), static_cast<uint64_t>(s_nnz));
THCudaCheck(cudaGetLastError());
apply::indexSparseIntersectionKernel<uint64_t, scalar_t>
<<<1, 1, 0, stream>>>(
I_INFO(r_indices_), I_INFO(t_indices_), I_INFO(s_indices_),
// reinterpret_cast shenanigans, because we don't actually have
// unsigned tensors...
static_cast<uint64_t>(t_nnz), static_cast<uint64_t>(s_nnz), reinterpret_cast<uint64_t*>(resultNnz.data_ptr()));
THCudaCheck(cudaGetLastError());
});
// sync! (surely there is a more idiomatic way to do this...)
LongTensor cpu_resultNnz = at::empty({1}, CPU(kLong));
cpu_resultNnz.copy_(resultNnz);
get_sparse_impl(r_)->set_nnz_and_narrow(cpu_resultNnz.accessor<int64_t, 1>()[0]);
return r_._coalesced_(true);
}
// --------------------------------------------------------------------
// sparse.sum() backward
//
// see NOTE [ sparse.sum() backward ]
// --------------------------------------------------------------------
template <typename scalar_t>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(512)
#endif
__global__ void _sparse_sum_backward_cuda_kernel(
int64_t total_threads,
const TensorInfo<int64_t, int64_t> grad_indices_ti,
const TensorInfo<int64_t, int64_t> input_indices_ti,
const TensorInfo<int64_t, int64_t> input_indices_pos_ti,
const TensorInfo<scalar_t, int64_t> grad_values_expand_ti,
TensorInfo<scalar_t, int64_t> grad_input_values_ti
) {
const int64_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= total_threads) return;
const int64_t j = input_indices_pos_ti.data[i];
bool has_match = false;
if (grad_indices_ti.data[j] == input_indices_ti.data[i]) {
has_match = true;
}
int64_t grad_input_values_stride0 = grad_input_values_ti.strides[0];
int64_t out_start = i * grad_input_values_stride0;
int64_t out_end = (i + 1) * grad_input_values_stride0;
int64_t in_start = j * grad_values_expand_ti.strides[0];
if (has_match) {
for (int64_t out_i = out_start, in_i = in_start; out_i < out_end; out_i++, in_i++) {
grad_input_values_ti.data[out_i] = grad_values_expand_ti.data[in_i];
}
}
else {
for (int64_t out_i = out_start; out_i < out_end; out_i++) {
grad_input_values_ti.data[out_i] = scalar_t(0);
}
}
}
Tensor _sparse_sum_backward_cuda(const Tensor& grad_, const SparseTensor& input_, IntArrayRef dims_to_sum) {
TORCH_CHECK(grad_.is_cuda(), "_sparse_sum_backward_cuda: expected 'grad_' to be CUDA tensor, but got CPU tensor");
TORCH_CHECK(input_.is_cuda(), "_sparse_sum_backward_cuda: expected 'input_' to be CUDA tensor, but got CPU tensor");
auto input = input_.coalesce();
const int64_t input_dim = input.dim();
auto dims_to_sum_b = dim_list_to_bitset(dims_to_sum, input_dim);
auto dims_to_sum_v = dims_to_sum.vec();
maybe_wrap_dims(dims_to_sum_v, input_dim);
LongTensor input_indices = input._indices();
Tensor input_values = input._values();
IntArrayRef input_sizes = input.sizes();
const int64_t input_sparse_dim = input.sparse_dim();
const int64_t input_dense_dim = input.dense_dim();
const int64_t input_nnz = input._nnz();
int64_t sparse_dims_to_sum_size = 0;
auto sparse_dims_to_keep_v = std::vector<int64_t>();
auto dense_dims_to_sum_v = std::vector<int64_t>();
for (int64_t d = 0; d < input_dim; d++) {
if (dims_to_sum_b[d]) {
if (d < input_sparse_dim) sparse_dims_to_sum_size ++;
else dense_dims_to_sum_v.emplace_back(d + 1 - input_sparse_dim);
}
else {
if (d < input_sparse_dim) sparse_dims_to_keep_v.emplace_back(d);
}
}
const bool sum_all_sparse_dim = (input_sparse_dim == sparse_dims_to_sum_size);
const bool sum_dense_dim = (dense_dims_to_sum_v.size() > 0);
const bool sum_sparse_dim = (sparse_dims_to_sum_size > 0);
if (sum_all_sparse_dim) {
TORCH_CHECK(!grad_.is_sparse(), "_sparse_sum_backward_cuda: expected grad Tensor to be dense since all sparse dims are summed");
auto grad_input_values = grad_;
auto expand_size = input_values.sizes().vec();
if (sum_dense_dim) {
auto dense_expand_size = std::vector<int64_t>(expand_size);
dense_expand_size.erase(dense_expand_size.begin()); // remove nnz dim
for (auto d : dense_dims_to_sum_v) grad_input_values = grad_input_values.unsqueeze(d - 1); // -1 since grad has no nnz dim
grad_input_values = grad_input_values.expand(dense_expand_size);
}
grad_input_values = grad_input_values.expand(expand_size).clone(at::MemoryFormat::Contiguous);
return at::_sparse_coo_tensor_with_dims_and_tensors(input_sparse_dim, input_dense_dim, input_sizes, input_indices.clone(at::MemoryFormat::Contiguous), grad_input_values, input.options().dtype(grad_.dtype())); // convert to grad dtype
}
else {
TORCH_CHECK(grad_.is_sparse(), "_sparse_sum_backward_cuda: expected grad_ Tensor to be sparse, but got dense");
auto grad = grad_.coalesce();
LongTensor grad_indices = grad._indices();
Tensor grad_values = grad._values();
const int64_t grad_sparse_dim = grad.sparse_dim();
const int64_t grad_nnz = grad._nnz();
Tensor grad_values_expand = grad_values;
if (sum_dense_dim) {
auto expand_size = input_values.sizes().vec();
if (sum_sparse_dim) expand_size[0] = grad_values.size(0); // update nnz
for (auto d : dense_dims_to_sum_v) grad_values_expand = grad_values_expand.unsqueeze(d);
grad_values_expand = grad_values_expand.expand(expand_size).clone(at::MemoryFormat::Contiguous);
}
Tensor grad_input_values;
if (!sum_sparse_dim) {
grad_input_values = grad_values_expand;
}
else {
int curDevice = -1;
cudaGetDevice(&curDevice);
cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::cuda::par(allocator).on(stream);
typedef thrust::device_ptr<int64_t> thrust_ptr;
grad_input_values = at::empty_like(input_values, grad_values.options());
AT_ASSERT(grad_input_values.is_cuda());
// get 1D indices
auto grad_sparse_dim_to_keep_v = std::vector<int64_t>(grad_sparse_dim);
std::iota(grad_sparse_dim_to_keep_v.begin(), grad_sparse_dim_to_keep_v.end(), 0);
auto grad_indices_1D = flatten_indices_by_dims(grad_indices, grad.sizes(), grad_sparse_dim_to_keep_v); // flatten indices on all sparse_dim of grad, output indices is coalesced and sorted
auto input_indices_1D = flatten_indices_by_dims(input_indices, input_sizes, sparse_dims_to_keep_v);
thrust_ptr grad_indices_iter(grad_indices_1D.data_ptr<int64_t>());
thrust_ptr input_indices_iter(input_indices_1D.data_ptr<int64_t>());
// store lower_bound of input indices at grad indices
LongTensor input_indices_pos = at::empty_like(input_indices_1D);
thrust_ptr input_indices_pos_iter(input_indices_pos.data_ptr<int64_t>());
thrust::lower_bound(policy,
grad_indices_iter, grad_indices_iter + grad_nnz,
input_indices_iter, input_indices_iter + input_nnz,
input_indices_pos_iter);
// config to run cuda kernel
int64_t total_threads = input_nnz;
const dim3 block = dim3(std::min(static_cast<int64_t>(cuda::getApplyBlock().x), total_threads));
dim3 grid;
TORCH_CHECK(cuda::getApplyGrid(total_threads, grid, curDevice), "_sparse_sum_backward_cuda: input too large or too many dimensions");
auto grad_indices_ti = getTensorInfo<int64_t, int64_t>(grad_indices_1D);
auto input_indices_ti = getTensorInfo<int64_t, int64_t>(input_indices_1D);
auto input_indices_pos_ti = getTensorInfo<int64_t, int64_t>(input_indices_pos);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad_values.scalar_type(), "_sparse_sum_backward_cuda", [&] {
auto grad_values_expand_ti = getTensorInfo<scalar_t, int64_t>(grad_values_expand);
auto grad_input_values_ti = getTensorInfo<scalar_t, int64_t>(grad_input_values);
_sparse_sum_backward_cuda_kernel<scalar_t><<<grid, block, 0, stream>>>(
total_threads,
grad_indices_ti,
input_indices_ti,
input_indices_pos_ti,
grad_values_expand_ti,
grad_input_values_ti
);
});
}
return at::_sparse_coo_tensor_with_dims_and_tensors(input_sparse_dim, input_dense_dim, input_sizes, input_indices.clone(at::MemoryFormat::Contiguous), grad_input_values, grad.options());
}
}
}} // namespace at::native
|
939be981943e5547c2dde62be5b40c993f698bda.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include "common.h"
#include "thrust.h"
namespace StreamCompaction {
namespace Thrust {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
thrust::host_vector<int> h_thrust_vec(idata, idata + n);
thrust::device_vector<int> dev_thrust_vec(h_thrust_vec);
thrust::device_vector<int> dv_out(n);
timer().startGpuTimer();
// TODO use `thrust::exclusive_scan`
// example: for device_vectors dv_in and dv_out:
// thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin());
thrust::exclusive_scan(dev_thrust_vec.begin(), dev_thrust_vec.end(), dv_out.begin());
timer().endGpuTimer();
thrust::copy(dv_out.begin(), dv_out.end(), odata);
}
}
}
| 939be981943e5547c2dde62be5b40c993f698bda.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include "common.h"
#include "thrust.h"
namespace StreamCompaction {
namespace Thrust {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
thrust::host_vector<int> h_thrust_vec(idata, idata + n);
thrust::device_vector<int> dev_thrust_vec(h_thrust_vec);
thrust::device_vector<int> dv_out(n);
timer().startGpuTimer();
// TODO use `thrust::exclusive_scan`
// example: for device_vectors dv_in and dv_out:
// thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin());
thrust::exclusive_scan(dev_thrust_vec.begin(), dev_thrust_vec.end(), dv_out.begin());
timer().endGpuTimer();
thrust::copy(dv_out.begin(), dv_out.end(), odata);
}
}
}
|
711ed3af5862a2b7bcf9efc45f83e2a8b0a71d87.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void scale_bias_kernel(float *output, float *biases, int n, int size)
{
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if (offset < size) output[(batch*n + filter)*size + offset] *= biases[filter];
} | 711ed3af5862a2b7bcf9efc45f83e2a8b0a71d87.cu | #include "includes.h"
__global__ void scale_bias_kernel(float *output, float *biases, int n, int size)
{
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if (offset < size) output[(batch*n + filter)*size + offset] *= biases[filter];
} |
7ab0a3db4df87f4dd86cc8c9c74b0c776c6b5684.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<math.h>
#include "sobel.cuh"
extern bool InitCUDA();
#define MONITOR_TIME
#define NUM_THREADS 256
/*
int matrix_gx[mn] = {
-1, 0, 1,
-2, 0, 2,
-1, 0, 1
};
int matrix_gy[mn] = {
-1, -2, -1,
0, 0, 0,
1, 2, 1
};
*/
template<typename T>
__global__ void doConvolutionInPointCUDA(
const T* src,
T* des,
const int m,
const int n,
const int thread_count,
const T default_value
) {
const int thread_id = threadIdx.x;
const int block_id = blockIdx.x;
int position = (block_id-1)*thread_count + (thread_id-1);
int x = position % m;
int y = position / m;
if(x <= 0 || x >= m-1 || y <= 0 || y >= n-1){
return;
}
T s_x = src[(y+1)*m + (x-1)] + 2 * src[(y+1)*m + (x)] + src[(y+1)*m + (x+1)] -
(src[(y-1)*m + (x-1)] + 2 * src[(y-1)*m + (x)] + src[(y-1)*m + (x+1)]);
T s_y = src[(y-1)*m + (x+1)] + 2 * src[(y)*m + (x+1)] + src[(y+1)*m + (x+1)] -
(src[(y-1)*m + (x-1)] + 2 * src[(y)*m + (x-1)] + src[(y+1)*m + (x-1)]);
float temp = sqrtf(powf((float)s_x, 2) + powf((float)s_y, 2));
if(abs(temp)>1e-5){
des[position] = src[position];
} else {
des[position] = default_value;
}
//des[position] = temp;
}
/*
every thread calculate one row
*/
template<typename T>
__global__ void doConvolutionInLineCUDA(
const T* src,
T* des,
const int m,
const int n,
const T default_value
) {
const int thread_id = threadIdx.x;
const int block_id = blockIdx.x;
int x = 0;
int y = block_id*NUM_THREADS + thread_id;
if(y <= 0 || y >= n-1){
return;
}
for(x; x<m; x++){
if(x <= 0 || x >= m-1){
continue;
}
T s_x = src[(y+1)*m + (x-1)] + 2 * src[(y+1)*m + (x)] + src[(y+1)*m + (x+1)] -
(src[(y-1)*m + (x-1)] + 2 * src[(y-1)*m + (x)] + src[(y-1)*m + (x+1)]);
T s_y = src[(y-1)*m + (x+1)] + 2 * src[(y)*m + (x+1)] + src[(y+1)*m + (x+1)] -
(src[(y-1)*m + (x-1)] + 2 * src[(y)*m + (x-1)] + src[(y+1)*m + (x-1)]);
float temp = sqrtf(powf((float)s_x, 2) + powf((float)s_y, 2));
if(abs(temp)>1e-5){
des[y*m+x] = src[y*m+x];
} else {
des[y*m+x] = default_value;
}
//des[y*m+x] = temp;
}
}
template<typename T>
hipError_t convolutionCUDA(
const T* src,
T* des,
const int m,
const int n,
const T default_value
) {
int array_size = m * n;
hipError_t cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
T *src_gpu, *des_gpu;
#ifdef MONITOR_TIME
clock_t start, end;
start = clock();
#endif
cudaStatus = hipMalloc((void**) &src_gpu, sizeof(T) * array_size);
cudaStatus = hipMalloc((void**) &des_gpu, sizeof(T) * array_size);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
{
//hipMemcpy2D(ac, sizeof(float) * n, a, sizeof(float) * lda, sizeof(float) * n, n, hipMemcpyHostToDevice);
cudaStatus = hipMemcpy(src_gpu, src, array_size*sizeof(T), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
//unsigned int num_thread = (array_size -1)% NUM_THREADS + 1;
//unsigned int blocks = (array_size + num_thread - 1) / num_thread;
//doConvolutionInPointCUDA<<<blocks, num_thread>>>(src_gpu, des_gpu, m, n, num_thread);
unsigned int blocks = (n + NUM_THREADS - 1) / NUM_THREADS;
hipLaunchKernelGGL(( doConvolutionInLineCUDA), dim3(blocks), dim3(n), 0, 0, src_gpu, des_gpu, m, n, default_value);
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
cudaStatus = hipMemcpy(des, des_gpu, array_size*sizeof(T), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
//hipMemcpy2D(c, sizeof(float) * ldc, cc, sizeof(float) * n, sizeof(float) * n, n, hipMemcpyDeviceToHost);
#ifdef MONITOR_TIME
end = clock();
float cost_time = (float)(end - start) / CLOCKS_PER_SEC;
printf("The cost time is: %f\n", cost_time);
#endif
}
Error:
hipFree(src_gpu);
hipFree(des_gpu);
return cudaStatus;
}
template<typename T>
void doConvolution(
const T* src,
T* des,
const int m,
const int n,
const T default_value
) {
for(int y=0;y<n;y++){
for(int x=0;x<m;x++){
if(y <= 0 || y >= n-1 || x <= 0 || x >= m-1){
continue;
}
T s_x = src[(y+1)*m + (x-1)] + 2 * src[(y+1)*m + (x)] + src[(y+1)*m + (x+1)] -
(src[(y-1)*m + (x-1)] + 2 * src[(y-1)*m + (x)] + src[(y-1)*m + (x+1)]);
T s_y = src[(y-1)*m + (x+1)] + 2 * src[(y)*m + (x+1)] + src[(y+1)*m + (x+1)] -
(src[(y-1)*m + (x-1)] + 2 * src[(y)*m + (x-1)] + src[(y+1)*m + (x-1)]);
T temp = sqrtf(powf(s_x, 2) + powf(s_y, 2));
if(abs(temp)>1e-5){
des[y*m+x] = src[y*m+x];
} else {
des[y*m+x] = default_value;
}
}
}
}
template<typename T>
int Get2DBorderTemplate(T* src, T* des, const int m , const int n, const T default_value){
if(m*n <= 0){
return 0;
}
if(src == NULL || des == NULL){
fprintf(stderr, "error parameters!");
return 1;
}
bool has_cuda = InitCUDA();
if(has_cuda){
hipError_t cudaStatus;
cudaStatus = convolutionCUDA(src, des, m ,n, default_value);
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
//cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
} else {
doConvolution(src, des, m ,n, default_value);
}
return 0;
}
int MY_EXPORT Get2DBorder(float* src, float* des, const int m , const int n) {
return Get2DBorderTemplate(src, des, m, n, 0.f);
}
int MY_EXPORT Get2DBorder(int* src, int* des, const int m , const int n) {
return Get2DBorderTemplate(src, des, m, n, 0);
} | 7ab0a3db4df87f4dd86cc8c9c74b0c776c6b5684.cu | #include<math.h>
#include "sobel.cuh"
extern bool InitCUDA();
#define MONITOR_TIME
#define NUM_THREADS 256
/*
int matrix_gx[mn] = {
-1, 0, 1,
-2, 0, 2,
-1, 0, 1
};
int matrix_gy[mn] = {
-1, -2, -1,
0, 0, 0,
1, 2, 1
};
*/
template<typename T>
__global__ void doConvolutionInPointCUDA(
const T* src,
T* des,
const int m,
const int n,
const int thread_count,
const T default_value
) {
const int thread_id = threadIdx.x;
const int block_id = blockIdx.x;
int position = (block_id-1)*thread_count + (thread_id-1);
int x = position % m;
int y = position / m;
if(x <= 0 || x >= m-1 || y <= 0 || y >= n-1){
return;
}
T s_x = src[(y+1)*m + (x-1)] + 2 * src[(y+1)*m + (x)] + src[(y+1)*m + (x+1)] -
(src[(y-1)*m + (x-1)] + 2 * src[(y-1)*m + (x)] + src[(y-1)*m + (x+1)]);
T s_y = src[(y-1)*m + (x+1)] + 2 * src[(y)*m + (x+1)] + src[(y+1)*m + (x+1)] -
(src[(y-1)*m + (x-1)] + 2 * src[(y)*m + (x-1)] + src[(y+1)*m + (x-1)]);
float temp = sqrtf(powf((float)s_x, 2) + powf((float)s_y, 2));
if(abs(temp)>1e-5){
des[position] = src[position];
} else {
des[position] = default_value;
}
//des[position] = temp;
}
/*
every thread calculate one row
*/
template<typename T>
__global__ void doConvolutionInLineCUDA(
const T* src,
T* des,
const int m,
const int n,
const T default_value
) {
const int thread_id = threadIdx.x;
const int block_id = blockIdx.x;
int x = 0;
int y = block_id*NUM_THREADS + thread_id;
if(y <= 0 || y >= n-1){
return;
}
for(x; x<m; x++){
if(x <= 0 || x >= m-1){
continue;
}
T s_x = src[(y+1)*m + (x-1)] + 2 * src[(y+1)*m + (x)] + src[(y+1)*m + (x+1)] -
(src[(y-1)*m + (x-1)] + 2 * src[(y-1)*m + (x)] + src[(y-1)*m + (x+1)]);
T s_y = src[(y-1)*m + (x+1)] + 2 * src[(y)*m + (x+1)] + src[(y+1)*m + (x+1)] -
(src[(y-1)*m + (x-1)] + 2 * src[(y)*m + (x-1)] + src[(y+1)*m + (x-1)]);
float temp = sqrtf(powf((float)s_x, 2) + powf((float)s_y, 2));
if(abs(temp)>1e-5){
des[y*m+x] = src[y*m+x];
} else {
des[y*m+x] = default_value;
}
//des[y*m+x] = temp;
}
}
template<typename T>
cudaError_t convolutionCUDA(
const T* src,
T* des,
const int m,
const int n,
const T default_value
) {
int array_size = m * n;
cudaError_t cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
T *src_gpu, *des_gpu;
#ifdef MONITOR_TIME
clock_t start, end;
start = clock();
#endif
cudaStatus = cudaMalloc((void**) &src_gpu, sizeof(T) * array_size);
cudaStatus = cudaMalloc((void**) &des_gpu, sizeof(T) * array_size);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
{
//cudaMemcpy2D(ac, sizeof(float) * n, a, sizeof(float) * lda, sizeof(float) * n, n, cudaMemcpyHostToDevice);
cudaStatus = cudaMemcpy(src_gpu, src, array_size*sizeof(T), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
//unsigned int num_thread = (array_size -1)% NUM_THREADS + 1;
//unsigned int blocks = (array_size + num_thread - 1) / num_thread;
//doConvolutionInPointCUDA<<<blocks, num_thread>>>(src_gpu, des_gpu, m, n, num_thread);
unsigned int blocks = (n + NUM_THREADS - 1) / NUM_THREADS;
doConvolutionInLineCUDA<<<blocks, n>>>(src_gpu, des_gpu, m, n, default_value);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
cudaStatus = cudaMemcpy(des, des_gpu, array_size*sizeof(T), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
//cudaMemcpy2D(c, sizeof(float) * ldc, cc, sizeof(float) * n, sizeof(float) * n, n, cudaMemcpyDeviceToHost);
#ifdef MONITOR_TIME
end = clock();
float cost_time = (float)(end - start) / CLOCKS_PER_SEC;
printf("The cost time is: %f\n", cost_time);
#endif
}
Error:
cudaFree(src_gpu);
cudaFree(des_gpu);
return cudaStatus;
}
template<typename T>
void doConvolution(
const T* src,
T* des,
const int m,
const int n,
const T default_value
) {
for(int y=0;y<n;y++){
for(int x=0;x<m;x++){
if(y <= 0 || y >= n-1 || x <= 0 || x >= m-1){
continue;
}
T s_x = src[(y+1)*m + (x-1)] + 2 * src[(y+1)*m + (x)] + src[(y+1)*m + (x+1)] -
(src[(y-1)*m + (x-1)] + 2 * src[(y-1)*m + (x)] + src[(y-1)*m + (x+1)]);
T s_y = src[(y-1)*m + (x+1)] + 2 * src[(y)*m + (x+1)] + src[(y+1)*m + (x+1)] -
(src[(y-1)*m + (x-1)] + 2 * src[(y)*m + (x-1)] + src[(y+1)*m + (x-1)]);
T temp = sqrtf(powf(s_x, 2) + powf(s_y, 2));
if(abs(temp)>1e-5){
des[y*m+x] = src[y*m+x];
} else {
des[y*m+x] = default_value;
}
}
}
}
template<typename T>
int Get2DBorderTemplate(T* src, T* des, const int m , const int n, const T default_value){
if(m*n <= 0){
return 0;
}
if(src == NULL || des == NULL){
fprintf(stderr, "error parameters!");
return 1;
}
bool has_cuda = InitCUDA();
if(has_cuda){
cudaError_t cudaStatus;
cudaStatus = convolutionCUDA(src, des, m ,n, default_value);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
//cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
} else {
doConvolution(src, des, m ,n, default_value);
}
return 0;
}
int MY_EXPORT Get2DBorder(float* src, float* des, const int m , const int n) {
return Get2DBorderTemplate(src, des, m, n, 0.f);
}
int MY_EXPORT Get2DBorder(int* src, int* des, const int m , const int n) {
return Get2DBorderTemplate(src, des, m, n, 0);
} |
fdb29e29c24026d935171385c9ffd3da504078a9.hip | // !!! This is a file automatically generated by hipify!!!
#include "CONV_cuBLAS.cuh"
using namespace std;
//From Berkeley Vision's Caffe
//Refer to Caffe's license : https://github.com/BVLC/caffe/blob/master/LICENSE
static inline bool is_a_ge_zero_and_a_lt_b(int a, int b)
{
return (unsigned int)a < (unsigned int)(b);
}
void Im2Col(float *data_im, int channels, int height, int width, int kernel_h, int kernel_w,
int pad_h, int pad_w, int stride_h, int stride_w, float *data_col)
{
int output_h = (height + 2 * pad_h - ((kernel_h - 1) + 1)) / stride_h + 1;
int output_w = (width + 2 * pad_w - ((kernel_w - 1) + 1)) / stride_w + 1;
int channel_size = height * width;
for (int channel = channels; channel--; data_im += channel_size) {
for (int kernel_row = 0; kernel_row < kernel_h; kernel_row++) {
for (int kernel_col = 0; kernel_col < kernel_w; kernel_col++) {
int input_row = -pad_h + kernel_row;
for (int output_rows = output_h; output_rows; output_rows--)
{
if (!is_a_ge_zero_and_a_lt_b(input_row, height))
{
for (int output_cols = output_w; output_cols; output_cols--)
{
*(data_col++) = 0;
}
}
else
{
int input_col = -pad_w + kernel_col;
for (int output_col = output_w; output_col; output_col--)
{
if (is_a_ge_zero_and_a_lt_b(input_col, width))
{
*(data_col++) = data_im[input_row * width + input_col];
} else {
*(data_col++) = 0;
}
input_col += stride_w;
}
}
input_row += stride_h;
}
}
}
}
}
void convolution_cuBLAS(float *input, float *weight, float *scratchpad, TensorDim in_dim, TensorDim out_dim, TensorDim sp_dim,
TensorDim kernal_dim, int pad, int stride, float *output)
{
GpuTimer timer;
float alpha = 1.0f;
float beta = 0.0f;
timer.Start();
Im2Col(input, in_dim.c, in_dim.h, in_dim.w, kernal_dim.h, kernal_dim.w, pad, pad, stride, stride, scratchpad);
timer.Stop();
printf("Im2Col conversion:\t %f msecs.\n",timer.Elapsed());
hipblasHandle_t handle;
hipblasCreate(&handle);
int A_wdith = kernal_dim.c * kernal_dim.h * kernal_dim.w;
int A_Height = kernal_dim.n;
int B_wdith = sp_dim.w;
// allocate device memory
float *d_A, *d_B, *d_C;
hipMalloc(&d_A, TensorSize(kernal_dim)*sizeof(float));
hipMalloc(&d_B, TensorSize(sp_dim)*sizeof(float));
hipMalloc(&d_C, TensorSize(out_dim)*sizeof(float));
timer.Start();
// copy host memory to device
hipMemcpy(d_A, weight, TensorSize(kernal_dim)*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_B, scratchpad, TensorSize(sp_dim)*sizeof(float), hipMemcpyHostToDevice);
timer.Stop();
printf("hipMemcpyHostToDevice:\t %f msecs.\n", timer.Elapsed());
//print_matrixNCHW(weight, kernal_dim);
//print_matrixNCHW(scratchpad, sp_dim);
//hipblasOperation_t HIPBLAS_OP_N;
timer.Start();
hipblasStatus_t ret = hipblasSgemm( handle,
HIPBLAS_OP_N,
HIPBLAS_OP_N,
B_wdith, /*Width B*/
A_Height, /*Height A*/
A_wdith, /*Width A*/
&alpha,
d_B, /*d_B*/
B_wdith, /*Width B*/
d_A, /*d_A*/
A_wdith, /*Width A*/
&beta,
d_C, /*d_C*/
B_wdith /*uiWB*/
);
timer.Stop();
printf("cublas Convolution:\t %f msecs.\n",timer.Elapsed());
// copy result from device to host
timer.Start();
hipMemcpy(output, d_C, TensorSize(out_dim)*sizeof(float), hipMemcpyDeviceToHost);
timer.Stop();
printf("hipMemcpyDeviceToHost:\t %f msecs.\n", timer.Elapsed());
}
| fdb29e29c24026d935171385c9ffd3da504078a9.cu |
#include "CONV_cuBLAS.cuh"
using namespace std;
//From Berkeley Vision's Caffe
//Refer to Caffe's license : https://github.com/BVLC/caffe/blob/master/LICENSE
static inline bool is_a_ge_zero_and_a_lt_b(int a, int b)
{
return (unsigned int)a < (unsigned int)(b);
}
void Im2Col(float *data_im, int channels, int height, int width, int kernel_h, int kernel_w,
int pad_h, int pad_w, int stride_h, int stride_w, float *data_col)
{
int output_h = (height + 2 * pad_h - ((kernel_h - 1) + 1)) / stride_h + 1;
int output_w = (width + 2 * pad_w - ((kernel_w - 1) + 1)) / stride_w + 1;
int channel_size = height * width;
for (int channel = channels; channel--; data_im += channel_size) {
for (int kernel_row = 0; kernel_row < kernel_h; kernel_row++) {
for (int kernel_col = 0; kernel_col < kernel_w; kernel_col++) {
int input_row = -pad_h + kernel_row;
for (int output_rows = output_h; output_rows; output_rows--)
{
if (!is_a_ge_zero_and_a_lt_b(input_row, height))
{
for (int output_cols = output_w; output_cols; output_cols--)
{
*(data_col++) = 0;
}
}
else
{
int input_col = -pad_w + kernel_col;
for (int output_col = output_w; output_col; output_col--)
{
if (is_a_ge_zero_and_a_lt_b(input_col, width))
{
*(data_col++) = data_im[input_row * width + input_col];
} else {
*(data_col++) = 0;
}
input_col += stride_w;
}
}
input_row += stride_h;
}
}
}
}
}
void convolution_cuBLAS(float *input, float *weight, float *scratchpad, TensorDim in_dim, TensorDim out_dim, TensorDim sp_dim,
TensorDim kernal_dim, int pad, int stride, float *output)
{
GpuTimer timer;
float alpha = 1.0f;
float beta = 0.0f;
timer.Start();
Im2Col(input, in_dim.c, in_dim.h, in_dim.w, kernal_dim.h, kernal_dim.w, pad, pad, stride, stride, scratchpad);
timer.Stop();
printf("Im2Col conversion:\t %f msecs.\n",timer.Elapsed());
cublasHandle_t handle;
cublasCreate(&handle);
int A_wdith = kernal_dim.c * kernal_dim.h * kernal_dim.w;
int A_Height = kernal_dim.n;
int B_wdith = sp_dim.w;
// allocate device memory
float *d_A, *d_B, *d_C;
cudaMalloc(&d_A, TensorSize(kernal_dim)*sizeof(float));
cudaMalloc(&d_B, TensorSize(sp_dim)*sizeof(float));
cudaMalloc(&d_C, TensorSize(out_dim)*sizeof(float));
timer.Start();
// copy host memory to device
cudaMemcpy(d_A, weight, TensorSize(kernal_dim)*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, scratchpad, TensorSize(sp_dim)*sizeof(float), cudaMemcpyHostToDevice);
timer.Stop();
printf("cudaMemcpyHostToDevice:\t %f msecs.\n", timer.Elapsed());
//print_matrixNCHW(weight, kernal_dim);
//print_matrixNCHW(scratchpad, sp_dim);
//cublasOperation_t CUBLAS_OP_N;
timer.Start();
cublasStatus_t ret = cublasSgemm( handle,
CUBLAS_OP_N,
CUBLAS_OP_N,
B_wdith, /*Width B*/
A_Height, /*Height A*/
A_wdith, /*Width A*/
&alpha,
d_B, /*d_B*/
B_wdith, /*Width B*/
d_A, /*d_A*/
A_wdith, /*Width A*/
&beta,
d_C, /*d_C*/
B_wdith /*uiWB*/
);
timer.Stop();
printf("cublas Convolution:\t %f msecs.\n",timer.Elapsed());
// copy result from device to host
timer.Start();
cudaMemcpy(output, d_C, TensorSize(out_dim)*sizeof(float), cudaMemcpyDeviceToHost);
timer.Stop();
printf("cudaMemcpyDeviceToHost:\t %f msecs.\n", timer.Elapsed());
}
|
bd005189c1cfeda5e7fc1dd50f881a09aaed0e84.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// by Jan Eric Kyprianidis <www.kyprianidis.com>
// Copyright (C) 2010-2012 Computer Graphics Systems Group at the
// Hasso-Plattner-Institut, Potsdam, Germany <www.hpi3d.de>
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
#include "gpu_convert.h"
__global__ void imp_8u_to_32f( const gpu_plm2<unsigned char> src, gpu_plm2<float> dst ) {
const int ix = __mul24(blockDim.x, blockIdx.x) + threadIdx.x;
const int iy = __mul24(blockDim.y, blockIdx.y) + threadIdx.y;
if (ix >= dst.w || iy >= dst.h)
return;
unsigned char c = src(ix, iy);
dst(ix, iy) = c / 255.0f;
}
__global__ void imp_8u_to_32f( const gpu_plm2<uchar4> src, gpu_plm2<float4> dst ) {
const int ix = __mul24(blockDim.x, blockIdx.x) + threadIdx.x;
const int iy = __mul24(blockDim.y, blockIdx.y) + threadIdx.y;
if (ix >= dst.w || iy >= dst.h)
return;
uchar4 c = src(ix, iy);
dst(ix, iy) = make_float4(c.x / 255.0f, c.y / 255.0f, c.z / 255.0f, c.w / 255.0f);
}
__global__ void imp_32f_to_8u( const gpu_plm2<float> src, gpu_plm2<unsigned char> dst) {
const int ix = __mul24(blockDim.x, blockIdx.x) + threadIdx.x;
const int iy = __mul24(blockDim.y, blockIdx.y) + threadIdx.y;
if (ix >= dst.w || iy >= dst.h)
return;
float c = clamp(src(ix, iy), 0.0f, 1.0f);
dst(ix, iy) = (unsigned char)(255.0f *c);
}
__global__ void imp_32f_to_8u( const gpu_plm2<float4> src, gpu_plm2<uchar4> dst ) {
const int ix = __mul24(blockDim.x, blockIdx.x) + threadIdx.x;
const int iy = __mul24(blockDim.y, blockIdx.y) + threadIdx.y;
if (ix >= dst.w || iy >= dst.h)
return;
float4 c = clamp(src(ix, iy), 0, 1);
dst(ix, iy) = make_uchar4((int)(255.0f *c.x), (int)(255.0f *c.y), (int)(255.0f *c.z), (int)(255.0f *c.w));
}
gpu_image<float> gpu_8u_to_32f( const gpu_image<unsigned char>& src ) {
gpu_image<float> dst(src.size());
hipLaunchKernelGGL(( imp_8u_to_32f), dim3(dst.blocks()), dim3(dst.threads()), 0, 0, src, dst);
GPU_CHECK_ERROR();
return dst;
}
gpu_image<float4> gpu_8u_to_32f( const gpu_image<uchar4>& src ) {
gpu_image<float4> dst(src.size());
GPU_CHECK_ERROR();
hipLaunchKernelGGL(( imp_8u_to_32f), dim3(dst.blocks()), dim3(dst.threads()), 0, 0, src, dst);
GPU_CHECK_ERROR();
return dst;
}
gpu_image<unsigned char> gpu_32f_to_8u( const gpu_image<float>& src ) {
gpu_image<unsigned char> dst(src.size());
hipLaunchKernelGGL(( imp_32f_to_8u), dim3(dst.blocks()), dim3(dst.threads()), 0, 0, src, dst);
GPU_CHECK_ERROR();
return dst;
}
gpu_image<uchar4> gpu_32f_to_8u( const gpu_image<float4>& src ) {
gpu_image<uchar4> dst(src.size());
hipLaunchKernelGGL(( imp_32f_to_8u), dim3(dst.blocks()), dim3(dst.threads()), 0, 0, src, dst);
GPU_CHECK_ERROR();
return dst;
}
| bd005189c1cfeda5e7fc1dd50f881a09aaed0e84.cu | //
// by Jan Eric Kyprianidis <www.kyprianidis.com>
// Copyright (C) 2010-2012 Computer Graphics Systems Group at the
// Hasso-Plattner-Institut, Potsdam, Germany <www.hpi3d.de>
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
#include "gpu_convert.h"
__global__ void imp_8u_to_32f( const gpu_plm2<unsigned char> src, gpu_plm2<float> dst ) {
const int ix = __mul24(blockDim.x, blockIdx.x) + threadIdx.x;
const int iy = __mul24(blockDim.y, blockIdx.y) + threadIdx.y;
if (ix >= dst.w || iy >= dst.h)
return;
unsigned char c = src(ix, iy);
dst(ix, iy) = c / 255.0f;
}
__global__ void imp_8u_to_32f( const gpu_plm2<uchar4> src, gpu_plm2<float4> dst ) {
const int ix = __mul24(blockDim.x, blockIdx.x) + threadIdx.x;
const int iy = __mul24(blockDim.y, blockIdx.y) + threadIdx.y;
if (ix >= dst.w || iy >= dst.h)
return;
uchar4 c = src(ix, iy);
dst(ix, iy) = make_float4(c.x / 255.0f, c.y / 255.0f, c.z / 255.0f, c.w / 255.0f);
}
__global__ void imp_32f_to_8u( const gpu_plm2<float> src, gpu_plm2<unsigned char> dst) {
const int ix = __mul24(blockDim.x, blockIdx.x) + threadIdx.x;
const int iy = __mul24(blockDim.y, blockIdx.y) + threadIdx.y;
if (ix >= dst.w || iy >= dst.h)
return;
float c = clamp(src(ix, iy), 0.0f, 1.0f);
dst(ix, iy) = (unsigned char)(255.0f *c);
}
__global__ void imp_32f_to_8u( const gpu_plm2<float4> src, gpu_plm2<uchar4> dst ) {
const int ix = __mul24(blockDim.x, blockIdx.x) + threadIdx.x;
const int iy = __mul24(blockDim.y, blockIdx.y) + threadIdx.y;
if (ix >= dst.w || iy >= dst.h)
return;
float4 c = clamp(src(ix, iy), 0, 1);
dst(ix, iy) = make_uchar4((int)(255.0f *c.x), (int)(255.0f *c.y), (int)(255.0f *c.z), (int)(255.0f *c.w));
}
gpu_image<float> gpu_8u_to_32f( const gpu_image<unsigned char>& src ) {
gpu_image<float> dst(src.size());
imp_8u_to_32f<<<dst.blocks(), dst.threads()>>>(src, dst);
GPU_CHECK_ERROR();
return dst;
}
gpu_image<float4> gpu_8u_to_32f( const gpu_image<uchar4>& src ) {
gpu_image<float4> dst(src.size());
GPU_CHECK_ERROR();
imp_8u_to_32f<<<dst.blocks(), dst.threads()>>>(src, dst);
GPU_CHECK_ERROR();
return dst;
}
gpu_image<unsigned char> gpu_32f_to_8u( const gpu_image<float>& src ) {
gpu_image<unsigned char> dst(src.size());
imp_32f_to_8u<<<dst.blocks(), dst.threads()>>>(src, dst);
GPU_CHECK_ERROR();
return dst;
}
gpu_image<uchar4> gpu_32f_to_8u( const gpu_image<float4>& src ) {
gpu_image<uchar4> dst(src.size());
imp_32f_to_8u<<<dst.blocks(), dst.threads()>>>(src, dst);
GPU_CHECK_ERROR();
return dst;
}
|
95677737bf664371ff7dfdb753f1981f78c0af6a.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<32>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<32>;
using LayoutDst = cutlass::layout::TensorNCxHWx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 64>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 64>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 16, 16, false,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| 95677737bf664371ff7dfdb753f1981f78c0af6a.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<32>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<32>;
using LayoutDst = cutlass::layout::TensorNCxHWx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 64>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 64>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 16, 16, false,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
280b4a2ff7d047c26f7db753dd32200e0216a736.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "tissueGPU3Kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_tissxyz = NULL;
hipMalloc(&d_tissxyz, XSIZE*YSIZE);
float *d_vessxyz = NULL;
hipMalloc(&d_vessxyz, XSIZE*YSIZE);
float *d_pt000 = NULL;
hipMalloc(&d_pt000, XSIZE*YSIZE);
float *d_qv000 = NULL;
hipMalloc(&d_qv000, XSIZE*YSIZE);
int nnt = 1;
int nnv = 1;
int is2d = 1;
float req = 1;
float r2d = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
tissueGPU3Kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_tissxyz,d_vessxyz,d_pt000,d_qv000,nnt,nnv,is2d,req,r2d);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
tissueGPU3Kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_tissxyz,d_vessxyz,d_pt000,d_qv000,nnt,nnv,is2d,req,r2d);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
tissueGPU3Kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_tissxyz,d_vessxyz,d_pt000,d_qv000,nnt,nnv,is2d,req,r2d);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 280b4a2ff7d047c26f7db753dd32200e0216a736.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "tissueGPU3Kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_tissxyz = NULL;
cudaMalloc(&d_tissxyz, XSIZE*YSIZE);
float *d_vessxyz = NULL;
cudaMalloc(&d_vessxyz, XSIZE*YSIZE);
float *d_pt000 = NULL;
cudaMalloc(&d_pt000, XSIZE*YSIZE);
float *d_qv000 = NULL;
cudaMalloc(&d_qv000, XSIZE*YSIZE);
int nnt = 1;
int nnv = 1;
int is2d = 1;
float req = 1;
float r2d = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
tissueGPU3Kernel<<<gridBlock,threadBlock>>>(d_tissxyz,d_vessxyz,d_pt000,d_qv000,nnt,nnv,is2d,req,r2d);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
tissueGPU3Kernel<<<gridBlock,threadBlock>>>(d_tissxyz,d_vessxyz,d_pt000,d_qv000,nnt,nnv,is2d,req,r2d);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
tissueGPU3Kernel<<<gridBlock,threadBlock>>>(d_tissxyz,d_vessxyz,d_pt000,d_qv000,nnt,nnv,is2d,req,r2d);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
d16964273a6cb312c7c811eb8769b60fc6a8ea32.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/****************************************************************************
*
* main.cu, Version 1.0.0 Mon 09 Jan 2012
*
* ----------------------------------------------------------------------------
*
* HIP EGS
* Copyright (C) 2012 CancerCare Manitoba
*
* The latest version of HIP EGS and additional information are available online at
* http://www.physics.umanitoba.ca/~elbakri/cuda_egs/ and http://www.lippuner.ca/cuda_egs
*
* HIP EGS is free software; you can redistribute it and/or modify it under the
* terms of the GNU General Public License as published by the Free Software
* Foundation; either version 2 of the License, or (at your option) any later
* version.
*
* HIP EGS is distributed in the hope that it will be useful, but WITHOUT ANY
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
* ----------------------------------------------------------------------------
*
* Contact:
*
* Jonas Lippuner
* Email: [email protected]
*
****************************************************************************/
#define HIP_EGS
#include "EGS.h"
#include "output.c"
#include "media.c"
#include "init.hip"
#include "kernels.hip"
uint read_step_counts(ulong *this_total, ulong *grand_total) {
clock_t start = clock();
hipMemcpy(h_total_step_counts, d_total_step_counts, sizeof(total_step_counts_t), hipMemcpyDeviceToHost);
for (uchar i = 0; i < NUM_CAT; i++) {
this_total[i] = 0;
for (uchar j = 0; j < SIMULATION_NUM_BLOCKS; j++)
this_total[i] += (*h_total_step_counts)[j][i];
grand_total[i] += this_total[i];
}
clock_t stop = clock();
return stop - start;
}
int main(int argc, char **argv) {
// record start time
clock_t tic = clock();
// set RNG seed
uint seed = 1325631759;
// check whether GPU device is available
int deviceCount = 0;
hipGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
printf("HIP device is not available. Exit.\n");
return 0;
}
int GPUId = 0;
hipSetDevice(GPUId);
printf(" Phantom . . . . . . . . . . %s\n", egsphant_file);
printf(" PEGS4 file . . . . . . . . %s\n", pegs_file);
ulong num_histories = 100000000;
printf(" Histories . . . . . . . . . %zu\n", num_histories);
printf(" MT parameter file . . . . . %s\n", MT_params_file);
printf(" Photon xsections . . . . . %s\n", photon_xsections);
printf(" Atomic ff file . . . . . . %s\n", atomic_ff_file);
printf(" Spectrum file . . . . . . %s\n", spec_file);
// write settings
printf("\nSettings\n");
printf(" Warps per block . . . . . . %d\n", SIMULATION_WARPS_PER_BLOCK);
printf(" Blocks per multiprocessor . %d\n", SIMULATION_BLOCKS_PER_MULTIPROC);
printf(" Iterations outer loop . . . %d\n", SIMULATION_ITERATIONS);
#ifdef USE_ENERGY_SPECTRUM
printf(" USE_ENERGY_SPECTRUM . . . . enabled\n");
#else
printf(" USE_ENERGY_SPECTRUM . . . . disabled\n");
#endif
#ifdef DO_LIST_DEPTH_COUNT
printf(" DO_LIST_DEPTH_COUNT . . . . enabled\n");
#else
printf(" DO_LIST_DEPTH_COUNT . . . . disabled\n");
#endif
// perform initialization
init(seed);
clock_t tic2 = clock();
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float elapsed;
float time_sim = 0.0F;
float time_sum = 0.0F;
uint time_copy = 0;
ulong this_total[NUM_CAT];
ulong grand_total[NUM_CAT];
for (uchar i = 0; i < NUM_CAT; i++)
grand_total[i] = 0;
bool limit_reached = grand_total[p_new_particle] >= num_histories;
ulong num_in_progress = 0;
bool init = true;
// list depth counter
#ifdef DO_LIST_DEPTH_COUNT
ulong list_depth = 0;
ulong num_it = 0;
#endif
printf("simulation running, wait for ETA...");
do {
// do simulation step
hipEventRecord(start);
hipLaunchKernelGGL(( simulation_step_kernel), dim3(dim3(SIMULATION_BLOCKS_PER_MULTIPROC, NUM_MULTIPROC)), dim3(SIMULATION_WARPS_PER_BLOCK * WARP_SIZE), 0, 0, init, limit_reached);
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed, start, stop);
time_sim += elapsed;
init = false;
// sum detector scores
hipEventRecord(start);
hipLaunchKernelGGL(( sum_detector_scores_kernel), dim3(SUM_DETECTOR_NUM_BLOCKS), dim3(SUM_DETECTOR_WARPS_PER_BLOCK * WARP_SIZE), 0, 0, );
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed, start, stop);
time_sum += elapsed;
// copy counts from device
time_copy += read_step_counts(this_total, grand_total);
ulong num_finished_histories = grand_total[p_new_particle];
limit_reached = num_finished_histories >= num_histories;
// list depth counter
#ifdef DO_LIST_DEPTH_COUNT
hipMemcpy(h_total_list_depth, d_total_list_depth, sizeof(total_list_depth_t), hipMemcpyDeviceToHost);
hipMemcpy(h_total_num_inner_iterations, d_total_num_inner_iterations, sizeof(total_num_inner_iterations_t), hipMemcpyDeviceToHost);
for (uchar i = 0; i < SIMULATION_NUM_BLOCKS; i++) {
list_depth += (*h_total_list_depth)[i];
num_it += (*h_total_num_inner_iterations)[i];
}
#endif
// count number of particles in progress
num_in_progress = 0;
num_in_progress += this_total[p_cutoff_discard];
num_in_progress += this_total[p_user_discard];
num_in_progress += this_total[p_photon_step];
num_in_progress += this_total[p_rayleigh];
num_in_progress += this_total[p_compton];
num_in_progress += this_total[p_photo];
num_in_progress += this_total[p_pair];
num_in_progress += this_total[p_new_particle];
// calculate ETA and display progress
clock_t tac = clock();
elapsed = (float)(tac - tic) / (float)CLOCKS_PER_SEC;
float complete = (float)num_finished_histories / (float)num_histories;
float eta = elapsed / complete - elapsed;
if (eta < 0.0F)
eta = 0.0F;
printf("\r%zu (%.2f%%) histories started, elapsed time: %.0f, ETA: %.0f ",
num_finished_histories, 100.0F * complete, elapsed, eta);
} while (num_in_progress > 0);
printf("\r");
printf("\nSimulation step counts\n");
printf(" Cutoff discard . . . . . . %zu\n", grand_total[p_cutoff_discard]);
printf(" User discard . . . . . . . %zu\n", grand_total[p_user_discard]);
printf(" Photon step . . . . . . . . %zu\n", grand_total[p_photon_step]);
printf(" Rayleigh . . . . . . . . . %zu\n", grand_total[p_rayleigh]);
printf(" Compton . . . . . . . . . . %zu\n", grand_total[p_compton]);
printf(" Photo . . . . . . . . . . . %zu\n", grand_total[p_photo]);
printf(" Pair . . . . . . . . . . . %zu\n", grand_total[p_pair]);
printf(" New particles . . . . . . . %zu\n", grand_total[p_new_particle]);
// list depth counter
#ifdef DO_LIST_DEPTH_COUNT
printf("\nDivergence\n");
printf(" Total different steps . . . %zu\n", list_depth);
printf(" Total iterations . . . . . %zu\n", num_it);
printf(" Average different steps . . %f\n", (double)list_depth / (double)num_it);
printf(" Average active threads . . %.2f %% (at least)\n", 100.0 * (double)num_it / (double)list_depth);
#endif
hipEventDestroy(start);
hipEventDestroy(stop);
// copy results
clock_t copy_tic = clock();
uint size_det = h_detector.N.x * h_detector.N.y * sizeof(double);
double *detector_results_count[NUM_DETECTOR_CAT + 1];
double *detector_results_energy[NUM_DETECTOR_CAT + 1];
detector_results_count[NUM_DETECTOR_CAT] = (double*)malloc(size_det);
detector_results_energy[NUM_DETECTOR_CAT] = (double*)malloc(size_det);
for (uchar i = 0; i < NUM_DETECTOR_CAT; i++) {
detector_results_count[i] = (double*)malloc(size_det);
detector_results_energy[i] = (double*)malloc(size_det);
// copy detector totals from device
hipMemcpy(detector_results_count[i], d_detector_totals_count[i], size_det, hipMemcpyDeviceToHost);
hipMemcpy(detector_results_energy[i], d_detector_totals_energy[i], size_det, hipMemcpyDeviceToHost);
}
total_weights_t h_total_weights;
hipMemcpy(h_total_weights, d_total_weights, sizeof(total_weights_t), hipMemcpyDeviceToHost);
clock_t copy_tac = clock();
time_copy += (copy_tac - copy_tic);
double total_weight = 0.0F;
for (uchar i = 0; i < SIMULATION_NUM_BLOCKS; i++)
total_weight += h_total_weights[i];
double average_weight = total_weight / (double)grand_total[p_new_particle];
for (uint i = 0; i < h_detector.N.x * h_detector.N.y; i++) {
double total_count = 0.0F;
double total_energy = 0.0F;
for (uchar j = 0; j < NUM_DETECTOR_CAT; j++) {
detector_results_count[j][i] /= average_weight;
detector_results_energy[j][i] /= average_weight;
total_count += detector_results_count[j][i];
total_energy += detector_results_energy[j][i];
}
detector_results_count[NUM_DETECTOR_CAT][i] = total_count;
detector_results_energy[NUM_DETECTOR_CAT][i] = total_energy;
}
write_output("./", "count", detector_results_count);
write_output("./", "energy", detector_results_energy);
for (uchar i = 0; i <= NUM_DETECTOR_CAT; i++) {
free(detector_results_count[i]);
free(detector_results_energy[i]);
}
free_all();
clock_t tac = clock();
float time_copy_f = (float)time_copy / (float)CLOCKS_PER_SEC * 1000.0F;
float total_time = (float)(tac - tic) / (float)CLOCKS_PER_SEC * 1000.0F;
float init_time = (float)(tic2 - tic) / (float)CLOCKS_PER_SEC * 1000.0F;
float total_cpu_time = (float)(tac - tic2) / (float)CLOCKS_PER_SEC * 1000.0F;
float other = total_time - time_copy_f - time_sim - time_sum;
printf("\nTiming statistics\n");
printf(" Elapsed time . . . . . . . %.2f ms (%.2f %%)\n", total_time, 100.0);
printf(" Total CPU/GPU . . . . . . . %.2f ms (%.2f %%)\n", total_cpu_time, 100.0F * total_cpu_time / total_time);
printf(" Simulation kernel . . . . . %.2f ms (%.2f %%)\n", time_sim, 100.0F * time_sim / total_time);
printf(" Summing kernel . . . . . . %.2f ms (%.2f %%)\n", time_sum, 100.0F * time_sum / total_time);
printf(" Copying . . . . . . . . . . %.2f ms (%.2f %%)\n", time_copy_f, 100.0F * time_copy_f / total_time);
printf(" Other . . . . . . . . . . . %.2f ms (%.2f %%)\n", other, 100.0F * other / total_time);
printf(" Initialization. . . . . . . %.2f ms (%.2f %%)\n", init_time, 100.0F * init_time / total_time);
printf("\nHistories per ms: %f\n", (float)grand_total[p_new_particle] / total_time);
return 0;
}
| d16964273a6cb312c7c811eb8769b60fc6a8ea32.cu | /****************************************************************************
*
* main.cu, Version 1.0.0 Mon 09 Jan 2012
*
* ----------------------------------------------------------------------------
*
* HIP EGS
* Copyright (C) 2012 CancerCare Manitoba
*
* The latest version of HIP EGS and additional information are available online at
* http://www.physics.umanitoba.ca/~elbakri/cuda_egs/ and http://www.lippuner.ca/cuda_egs
*
* HIP EGS is free software; you can redistribute it and/or modify it under the
* terms of the GNU General Public License as published by the Free Software
* Foundation; either version 2 of the License, or (at your option) any later
* version.
*
* HIP EGS is distributed in the hope that it will be useful, but WITHOUT ANY
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
* ----------------------------------------------------------------------------
*
* Contact:
*
* Jonas Lippuner
* Email: [email protected]
*
****************************************************************************/
#define HIP_EGS
#include "EGS.h"
#include "output.c"
#include "media.c"
#include "init.cu"
#include "kernels.cu"
uint read_step_counts(ulong *this_total, ulong *grand_total) {
clock_t start = clock();
hipMemcpy(h_total_step_counts, d_total_step_counts, sizeof(total_step_counts_t), hipMemcpyDeviceToHost);
for (uchar i = 0; i < NUM_CAT; i++) {
this_total[i] = 0;
for (uchar j = 0; j < SIMULATION_NUM_BLOCKS; j++)
this_total[i] += (*h_total_step_counts)[j][i];
grand_total[i] += this_total[i];
}
clock_t stop = clock();
return stop - start;
}
int main(int argc, char **argv) {
// record start time
clock_t tic = clock();
// set RNG seed
uint seed = 1325631759;
// check whether GPU device is available
int deviceCount = 0;
hipGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
printf("HIP device is not available. Exit.\n");
return 0;
}
int GPUId = 0;
hipSetDevice(GPUId);
printf(" Phantom . . . . . . . . . . %s\n", egsphant_file);
printf(" PEGS4 file . . . . . . . . %s\n", pegs_file);
ulong num_histories = 100000000;
printf(" Histories . . . . . . . . . %zu\n", num_histories);
printf(" MT parameter file . . . . . %s\n", MT_params_file);
printf(" Photon xsections . . . . . %s\n", photon_xsections);
printf(" Atomic ff file . . . . . . %s\n", atomic_ff_file);
printf(" Spectrum file . . . . . . %s\n", spec_file);
// write settings
printf("\nSettings\n");
printf(" Warps per block . . . . . . %d\n", SIMULATION_WARPS_PER_BLOCK);
printf(" Blocks per multiprocessor . %d\n", SIMULATION_BLOCKS_PER_MULTIPROC);
printf(" Iterations outer loop . . . %d\n", SIMULATION_ITERATIONS);
#ifdef USE_ENERGY_SPECTRUM
printf(" USE_ENERGY_SPECTRUM . . . . enabled\n");
#else
printf(" USE_ENERGY_SPECTRUM . . . . disabled\n");
#endif
#ifdef DO_LIST_DEPTH_COUNT
printf(" DO_LIST_DEPTH_COUNT . . . . enabled\n");
#else
printf(" DO_LIST_DEPTH_COUNT . . . . disabled\n");
#endif
// perform initialization
init(seed);
clock_t tic2 = clock();
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float elapsed;
float time_sim = 0.0F;
float time_sum = 0.0F;
uint time_copy = 0;
ulong this_total[NUM_CAT];
ulong grand_total[NUM_CAT];
for (uchar i = 0; i < NUM_CAT; i++)
grand_total[i] = 0;
bool limit_reached = grand_total[p_new_particle] >= num_histories;
ulong num_in_progress = 0;
bool init = true;
// list depth counter
#ifdef DO_LIST_DEPTH_COUNT
ulong list_depth = 0;
ulong num_it = 0;
#endif
printf("simulation running, wait for ETA...");
do {
// do simulation step
hipEventRecord(start);
simulation_step_kernel<<<dim3(SIMULATION_BLOCKS_PER_MULTIPROC, NUM_MULTIPROC), SIMULATION_WARPS_PER_BLOCK * WARP_SIZE>>>(init, limit_reached);
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed, start, stop);
time_sim += elapsed;
init = false;
// sum detector scores
hipEventRecord(start);
sum_detector_scores_kernel<<<SUM_DETECTOR_NUM_BLOCKS, SUM_DETECTOR_WARPS_PER_BLOCK * WARP_SIZE>>>();
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed, start, stop);
time_sum += elapsed;
// copy counts from device
time_copy += read_step_counts(this_total, grand_total);
ulong num_finished_histories = grand_total[p_new_particle];
limit_reached = num_finished_histories >= num_histories;
// list depth counter
#ifdef DO_LIST_DEPTH_COUNT
hipMemcpy(h_total_list_depth, d_total_list_depth, sizeof(total_list_depth_t), hipMemcpyDeviceToHost);
hipMemcpy(h_total_num_inner_iterations, d_total_num_inner_iterations, sizeof(total_num_inner_iterations_t), hipMemcpyDeviceToHost);
for (uchar i = 0; i < SIMULATION_NUM_BLOCKS; i++) {
list_depth += (*h_total_list_depth)[i];
num_it += (*h_total_num_inner_iterations)[i];
}
#endif
// count number of particles in progress
num_in_progress = 0;
num_in_progress += this_total[p_cutoff_discard];
num_in_progress += this_total[p_user_discard];
num_in_progress += this_total[p_photon_step];
num_in_progress += this_total[p_rayleigh];
num_in_progress += this_total[p_compton];
num_in_progress += this_total[p_photo];
num_in_progress += this_total[p_pair];
num_in_progress += this_total[p_new_particle];
// calculate ETA and display progress
clock_t tac = clock();
elapsed = (float)(tac - tic) / (float)CLOCKS_PER_SEC;
float complete = (float)num_finished_histories / (float)num_histories;
float eta = elapsed / complete - elapsed;
if (eta < 0.0F)
eta = 0.0F;
printf("\r%zu (%.2f%%) histories started, elapsed time: %.0f, ETA: %.0f ",
num_finished_histories, 100.0F * complete, elapsed, eta);
} while (num_in_progress > 0);
printf("\r");
printf("\nSimulation step counts\n");
printf(" Cutoff discard . . . . . . %zu\n", grand_total[p_cutoff_discard]);
printf(" User discard . . . . . . . %zu\n", grand_total[p_user_discard]);
printf(" Photon step . . . . . . . . %zu\n", grand_total[p_photon_step]);
printf(" Rayleigh . . . . . . . . . %zu\n", grand_total[p_rayleigh]);
printf(" Compton . . . . . . . . . . %zu\n", grand_total[p_compton]);
printf(" Photo . . . . . . . . . . . %zu\n", grand_total[p_photo]);
printf(" Pair . . . . . . . . . . . %zu\n", grand_total[p_pair]);
printf(" New particles . . . . . . . %zu\n", grand_total[p_new_particle]);
// list depth counter
#ifdef DO_LIST_DEPTH_COUNT
printf("\nDivergence\n");
printf(" Total different steps . . . %zu\n", list_depth);
printf(" Total iterations . . . . . %zu\n", num_it);
printf(" Average different steps . . %f\n", (double)list_depth / (double)num_it);
printf(" Average active threads . . %.2f %% (at least)\n", 100.0 * (double)num_it / (double)list_depth);
#endif
hipEventDestroy(start);
hipEventDestroy(stop);
// copy results
clock_t copy_tic = clock();
uint size_det = h_detector.N.x * h_detector.N.y * sizeof(double);
double *detector_results_count[NUM_DETECTOR_CAT + 1];
double *detector_results_energy[NUM_DETECTOR_CAT + 1];
detector_results_count[NUM_DETECTOR_CAT] = (double*)malloc(size_det);
detector_results_energy[NUM_DETECTOR_CAT] = (double*)malloc(size_det);
for (uchar i = 0; i < NUM_DETECTOR_CAT; i++) {
detector_results_count[i] = (double*)malloc(size_det);
detector_results_energy[i] = (double*)malloc(size_det);
// copy detector totals from device
hipMemcpy(detector_results_count[i], d_detector_totals_count[i], size_det, hipMemcpyDeviceToHost);
hipMemcpy(detector_results_energy[i], d_detector_totals_energy[i], size_det, hipMemcpyDeviceToHost);
}
total_weights_t h_total_weights;
hipMemcpy(h_total_weights, d_total_weights, sizeof(total_weights_t), hipMemcpyDeviceToHost);
clock_t copy_tac = clock();
time_copy += (copy_tac - copy_tic);
double total_weight = 0.0F;
for (uchar i = 0; i < SIMULATION_NUM_BLOCKS; i++)
total_weight += h_total_weights[i];
double average_weight = total_weight / (double)grand_total[p_new_particle];
for (uint i = 0; i < h_detector.N.x * h_detector.N.y; i++) {
double total_count = 0.0F;
double total_energy = 0.0F;
for (uchar j = 0; j < NUM_DETECTOR_CAT; j++) {
detector_results_count[j][i] /= average_weight;
detector_results_energy[j][i] /= average_weight;
total_count += detector_results_count[j][i];
total_energy += detector_results_energy[j][i];
}
detector_results_count[NUM_DETECTOR_CAT][i] = total_count;
detector_results_energy[NUM_DETECTOR_CAT][i] = total_energy;
}
write_output("./", "count", detector_results_count);
write_output("./", "energy", detector_results_energy);
for (uchar i = 0; i <= NUM_DETECTOR_CAT; i++) {
free(detector_results_count[i]);
free(detector_results_energy[i]);
}
free_all();
clock_t tac = clock();
float time_copy_f = (float)time_copy / (float)CLOCKS_PER_SEC * 1000.0F;
float total_time = (float)(tac - tic) / (float)CLOCKS_PER_SEC * 1000.0F;
float init_time = (float)(tic2 - tic) / (float)CLOCKS_PER_SEC * 1000.0F;
float total_cpu_time = (float)(tac - tic2) / (float)CLOCKS_PER_SEC * 1000.0F;
float other = total_time - time_copy_f - time_sim - time_sum;
printf("\nTiming statistics\n");
printf(" Elapsed time . . . . . . . %.2f ms (%.2f %%)\n", total_time, 100.0);
printf(" Total CPU/GPU . . . . . . . %.2f ms (%.2f %%)\n", total_cpu_time, 100.0F * total_cpu_time / total_time);
printf(" Simulation kernel . . . . . %.2f ms (%.2f %%)\n", time_sim, 100.0F * time_sim / total_time);
printf(" Summing kernel . . . . . . %.2f ms (%.2f %%)\n", time_sum, 100.0F * time_sum / total_time);
printf(" Copying . . . . . . . . . . %.2f ms (%.2f %%)\n", time_copy_f, 100.0F * time_copy_f / total_time);
printf(" Other . . . . . . . . . . . %.2f ms (%.2f %%)\n", other, 100.0F * other / total_time);
printf(" Initialization. . . . . . . %.2f ms (%.2f %%)\n", init_time, 100.0F * init_time / total_time);
printf("\nHistories per ms: %f\n", (float)grand_total[p_new_particle] / total_time);
return 0;
}
|
c01bc5b31e95723911c760681941713f80c29d3b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*Copyright (c) 2018 Data Intensive Applications and Systems Laboratory (DIAS)
Ecole Polytechnique Federale de Lausanne
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.*/
#include <cassert>
#include <iostream>
#include <numa.h>
#include <unistd.h>
#include "join-primitives.cuh"
__global__ void init_payload (int* R, int n) {
for (int i = threadIdx.x + blockIdx.x*blockDim.x; i < n; i += blockDim.x*gridDim.x)
R[i] = i;
}
/*
S= keys of data to be partitioned
P= payloads of data to be partitioned
heads= keeps information on first bucket per partition and number of elements in it, packet in one 64-bit integer (only used here)
chains= the successor of a bucket in the bucket list
out_cnts= number of elements per partition
buckets_used= how many buckets are reserved by the partitioning already
offsets= describe the segments that occur due to partitioning
note: multithreaded partitioning creates partitions that consist of contiguous segments
=> iterate over these segments to avoid handling empty slots
output_S= bucketized partitions of data keys
output_P= bucketized partitions of data payloads
cnt= number of elements to partition on total
log_parts- log of number of partitions
first_bit= shift the keys before "hashing"
num_threads= number of threads used in CPU side, used together with offsets
preconditions:
heads: current bucket (1 << 18) [special value for no bucket] and -1 elements (first write allocates bucket)
out_cnts: 0
buckets_used= number of partitions (first num_parts buckets are reserved)
*/
__global__ void partition_pass_one (
const int32_t * __restrict__ S,
const int32_t * __restrict__ P,
const size_t * __restrict__ offsets,
uint64_t * __restrict__ heads,
uint32_t * __restrict__ buckets_used,
uint32_t * __restrict__ chains,
uint32_t * __restrict__ out_cnts,
int32_t * __restrict__ output_S,
int32_t * __restrict__ output_P,
size_t cnt,
uint32_t log_parts,
uint32_t first_bit,
uint32_t num_threads) {
assert((((size_t) bucket_size) + ((size_t) blockDim.x) * gridDim.x) < (((size_t) 1) << 32));
const uint32_t parts = 1 << log_parts;
const int32_t parts_mask = parts - 1;
uint32_t * router = (uint32_t *) int_shared;
uint32_t segment = 0;
size_t segment_limit = offsets[1];
size_t segment_next = offsets[2];
size_t* shared_offsets = (size_t*) (int_shared + 1024*4 + 4*parts);
/*if no segmentation in input use one segment with all data, else copy the segment info*/
if (offsets != NULL) {
for (int i = threadIdx.x; i < 4*num_threads; i += blockDim.x) {
shared_offsets[i] = offsets[i];
}
} else {
for (int i = threadIdx.x; i < 4*num_threads; i += blockDim.x) {
if (i == 1)
shared_offsets[i] = cnt;
else
shared_offsets[i] = 0;
}
}
shared_offsets[4*num_threads] = cnt+4096;
shared_offsets[4*num_threads+1] = cnt+4096;
/*partition element counter starts at 0*/
for (size_t j = threadIdx.x ; j < parts ; j += blockDim.x )
router[1024*4 + parts + j] = 0;
if (threadIdx.x == 0)
router[0] = 0;
__syncthreads();
/*iterate over the segments*/
for (int u = 0; u < 2*num_threads; u++) {
size_t segment_start = shared_offsets[2*u];
size_t segment_limit = shared_offsets[2*u + 1];
size_t segment_end = segment_start + ((segment_limit - segment_start + 4096 - 1)/4096)*4096;
for (size_t i = 4 *(threadIdx.x + blockIdx.x * blockDim.x) + segment_start; i < segment_end ; i += 4 * blockDim.x * gridDim.x) {
vec4 thread_vals = *(reinterpret_cast<const vec4 *>(S + i));
uint32_t thread_keys[4];
/*compute local histogram for a chunk of 4*blockDim.x elements*/
#pragma unroll
for (int k = 0 ; k < 4 ; ++k){
if (i + k < segment_limit){
uint32_t partition = (hasht(thread_vals.i[k]) >> first_bit) & parts_mask;
atomicAdd(router + (1024 * 4 + parts + partition), 1);
thread_keys[k] = partition;
} else {
thread_keys[k] = 0;
}
}
__syncthreads();
for (size_t j = threadIdx.x; j < parts ; j += blockDim.x ) {
uint32_t cnt = router[1024 * 4 + parts + j];
if (cnt > 0){
atomicAdd(out_cnts + j, cnt);
uint32_t pcnt ;
uint32_t bucket ;
uint32_t next_buck;
bool repeat = true;
while (__any(repeat)){
if (repeat){
/*check if any of the output bucket is filling up*/
uint64_t old_heads = atomicAdd(heads + j, ((uint64_t) cnt) << 32);
atomicMin(heads + j, ((uint64_t) (2*bucket_size)) << 32);
pcnt = ((uint32_t) (old_heads >> 32));
bucket = (uint32_t) old_heads ;
/*now there are two cases:
// 2) old_heads.cnt > bucket_size ( => locked => retry)
// if (pcnt >= bucket_size) continue;*/
if (pcnt < bucket_size){
/* 1) old_heads.cnt <= bucket_size*/
/*check if the bucket was filled*/
if (pcnt + cnt >= bucket_size){
if (bucket < (1 << 18)) {
next_buck = atomicAdd(buckets_used, 1);
chains[bucket] = next_buck;
} else {
next_buck = j;
}
uint64_t tmp = next_buck + (((uint64_t) (pcnt + cnt - bucket_size)) << 32);
atomicExch(heads + j, tmp);
} else {
next_buck = bucket;
}
repeat = false;
}
}
}
router[1024 * 4 + j] = atomicAdd(router, cnt);
router[1024 * 4 + parts + j] = 0;//cnt;//pcnt ;
router[1024 * 4 + 2 * parts + j] = (bucket << log2_bucket_size) + pcnt;
router[1024 * 4 + 3 * parts + j] = next_buck << log2_bucket_size ;
}
}
__syncthreads();
uint32_t total_cnt = router[0];
__syncthreads();
/*calculate write positions for block-wise shuffle => atomicAdd on start of partition*/
#pragma unroll
for (int k = 0 ; k < 4 ; ++k){
if (i + k < segment_limit)
thread_keys[k] = atomicAdd(router + (1024 * 4 + thread_keys[k]), 1);
}
/*write the keys in shared memory*/
#pragma unroll
for (int k = 0 ; k < 4 ; ++k)
if (i + k < segment_limit)
router[thread_keys[k]] = thread_vals.i[k];
__syncthreads();
int32_t thread_parts[4];
/*read shuffled keys and write them to output partitions "somewhat" coalesced*/
#pragma unroll
for (int k = 0 ; k < 4 ; ++k){
if (threadIdx.x + 1024 * k < total_cnt) {
int32_t val = router[threadIdx.x + 1024 * k];
uint32_t partition = (hasht(val) >> first_bit) & parts_mask;
uint32_t cnt = router[1024 * 4 + partition] - (threadIdx.x + 1024 * k);
uint32_t bucket = router[1024 * 4 + 2 * parts + partition];
if (((bucket + cnt) ^ bucket) & ~bucket_size_mask){
uint32_t next_buck = router[1024 * 4 + 3 * parts + partition];
cnt = ((bucket + cnt) & bucket_size_mask);
bucket = next_buck;
}
bucket += cnt;
output_S[bucket] = val;
thread_parts[k] = partition;
}
}
__syncthreads();
/*read payloads of original data*/
thread_vals = *(reinterpret_cast<const vec4 *>(P + i));
/*shuffle payloads in shared memory, in the same offsets that we used for their corresponding keys*/
#pragma unroll
for (int k = 0 ; k < 4 ; ++k)
if (i + k < segment_limit) {
router[thread_keys[k]] = thread_vals.i[k];
}
__syncthreads();
/*write payloads to partition buckets in "somewhat coalesced manner"*/
#pragma unroll
for (int k = 0 ; k < 4 ; ++k){
if (threadIdx.x + 1024 * k < total_cnt) {
int32_t val = router[threadIdx.x + 1024 * k];
int32_t partition = thread_parts[k];
uint32_t cnt = router[1024 * 4 + partition] - (threadIdx.x + 1024 * k);
uint32_t bucket = router[1024 * 4 + 2 * parts + partition];
if (((bucket + cnt) ^ bucket) & ~bucket_size_mask){
uint32_t next_buck = router[1024 * 4 + 3 * parts + partition];
cnt = ((bucket + cnt) & bucket_size_mask);
bucket = next_buck;
}
bucket += cnt;
output_P[bucket] = val;
}
}
if (threadIdx.x == 0) router[0] = 0;
}
}
}
/*
compute information for the second partitioning pass
input:
chains=points to the successor in the bucket list for each bucket (hint: we append new buckets to the end)
out_cnts=count of elements per partition
output:
chains=packed value of element count in bucket and the partition the bucket belongs to
*/
__global__ void compute_bucket_info (uint32_t* chains, uint32_t* out_cnts, uint32_t log_parts) {
uint32_t parts = 1 << log_parts;
for (int p = threadIdx.x + blockIdx.x*blockDim.x; p < parts; p += gridDim.x*blockDim.x) {
uint32_t cur = p;
int32_t cnt = out_cnts[p];
while (cnt > 0) {
uint32_t local_cnt = (cnt >= 4096)? 4096 : cnt;
uint32_t val = (p << 13) + local_cnt;
uint32_t next = chains[cur];
chains[cur] = val;
cur = next;
cnt -= 4096;
}
}
}
/*
S= keys of data to be re-partitioned
P= payloads of data to be re-partitioned
heads= keeps information on first bucket per partition and number of elements in it, packet in one 64-bit integer (only used here)
chains= the successor of a bucket in the bucket list
out_cnts= number of elements per partition
buckets_used= how many buckets are reserved by the partitioning already
offsets= describe the segments that occur due to partitioning
note: multithreaded partitioning creates partitions that consist of contiguous segments
=> iterate over these segments to avoid handling empty slots
output_S= bucketized partitions of data keys (results)
output_P= bucketized partitions of data payloads (results)
S_log_parts- log of number of partitions for previous pass
log_parts- log of number of partitions for this pass
first_bit= shift the keys before "hashing"
bucket_num_ptr: number of input buckets
preconditions:
heads: current bucket (1 << 18) [special value for no bucket] and -1 elements (first write allocates bucket)
out_cnts: 0
buckets_used= number of partitions (first num_parts buckets are reserved)
*/
__global__ void partition_pass_two (
const int32_t * __restrict__ S,
const int32_t * __restrict__ P,
const uint32_t * __restrict__ bucket_info,
uint32_t * __restrict__ buckets_used,
uint64_t * heads,
uint32_t * __restrict__ chains,
uint32_t * __restrict__ out_cnts,
int32_t * __restrict__ output_S,
int32_t * __restrict__ output_P,
uint32_t S_log_parts,
uint32_t log_parts,
uint32_t first_bit,
uint32_t * bucket_num_ptr) {
assert((((size_t) bucket_size) + ((size_t) blockDim.x) * gridDim.x) < (((size_t) 1) << 32));
const uint32_t S_parts = 1 << S_log_parts;
const uint32_t parts = 1 << log_parts;
const int32_t parts_mask = parts - 1;
uint32_t buckets_num = *bucket_num_ptr;
uint32_t * router = (uint32_t *) int_shared; //[1024*4 + parts];
for (size_t j = threadIdx.x ; j < parts ; j += blockDim.x )
router[1024*4 + parts + j] = 0;
if (threadIdx.x == 0)
router[0] = 0;
__syncthreads();
/*each CUDA block processes a bucket at a time*/
for (size_t i = blockIdx.x; i < buckets_num; i += gridDim.x) {
uint32_t info = bucket_info[i];
/*number of elements per bucket*/
uint32_t cnt = info & ((1 << 13) - 1);
/*id of original partition*/
uint32_t pid = info >> 13;
vec4 thread_vals = *(reinterpret_cast<const vec4 *>(S + bucket_size * i + 4*threadIdx.x));
uint32_t thread_keys[4];
/*compute local histogram for the bucket*/
#pragma unroll
for (int k = 0 ; k < 4 ; ++k){
if (4*threadIdx.x + k < cnt){
uint32_t partition = (hasht(thread_vals.i[k]) >> first_bit) & parts_mask;
atomicAdd(router + (1024 * 4 + parts + partition), 1);
thread_keys[k] = partition;
} else {
thread_keys[k] = 0;
}
}
__syncthreads();
for (size_t j = threadIdx.x; j < parts ; j += blockDim.x ) {
uint32_t cnt = router[1024 * 4 + parts + j];
if (cnt > 0){
atomicAdd(out_cnts + (pid << log_parts) + j, cnt);
uint32_t pcnt ;
uint32_t bucket ;
uint32_t next_buck;
bool repeat = true;
while (__any(repeat)){
if (repeat){
uint64_t old_heads = atomicAdd(heads + (pid << log_parts) + j, ((uint64_t) cnt) << 32);
atomicMin(heads + (pid << log_parts) + j, ((uint64_t) (2*bucket_size)) << 32);
pcnt = ((uint32_t) (old_heads >> 32));
bucket = (uint32_t) old_heads ;
if (pcnt < bucket_size){
if (pcnt + cnt >= bucket_size){
if (bucket < (1 << 18)) {
next_buck = atomicAdd(buckets_used, 1);
chains[bucket] = next_buck;
} else {
next_buck = (pid << log_parts) + j;
}
uint64_t tmp = next_buck + (((uint64_t) (pcnt + cnt - bucket_size)) << 32);
atomicExch(heads + (pid << log_parts) + j, tmp);
} else {
next_buck = bucket;
}
repeat = false;
}
}
}
router[1024 * 4 + j] = atomicAdd(router, cnt);
router[1024 * 4 + parts + j] = 0;
router[1024 * 4 + 2 * parts + j] = (bucket << log2_bucket_size) + pcnt;
router[1024 * 4 + 3 * parts + j] = next_buck << log2_bucket_size ;
}
}
__syncthreads();
uint32_t total_cnt = router[0];
__syncthreads();
/*calculate write positions for block-wise shuffle => atomicAdd on start of partition*/
#pragma unroll
for (int k = 0 ; k < 4 ; ++k){
if (4*threadIdx.x + k < cnt)
thread_keys[k] = atomicAdd(router + (1024 * 4 + thread_keys[k]), 1);
}
/*write the keys in shared memory*/
#pragma unroll
for (int k = 0 ; k < 4 ; ++k)
if (4*threadIdx.x + k < cnt)
router[thread_keys[k]] = thread_vals.i[k];
__syncthreads();
int32_t thread_parts[4];
/*read shuffled keys and write them to output partitions "somewhat" coalesced*/
#pragma unroll
for (int k = 0 ; k < 4 ; ++k){
if (threadIdx.x + 1024 * k < total_cnt) {
int32_t val = router[threadIdx.x + 1024 * k];
uint32_t partition = (hasht(val) >> first_bit) & parts_mask;
uint32_t cnt = router[1024 * 4 + partition] - (threadIdx.x + 1024 * k);
uint32_t bucket = router[1024 * 4 + 2 * parts + partition];
if (((bucket + cnt) ^ bucket) & ~bucket_size_mask){
uint32_t next_buck = router[1024 * 4 + 3 * parts + partition];
cnt = ((bucket + cnt) & bucket_size_mask);
bucket = next_buck;
}
bucket += cnt;
output_S[bucket] = val;
thread_parts[k] = partition;
}
}
__syncthreads();
/*read payloads of original data*/
thread_vals = *(reinterpret_cast<const vec4 *>(P + i*bucket_size + 4*threadIdx.x));
/*shuffle payloads in shared memory, in the same offsets that we used for their corresponding keys*/
#pragma unroll
for (int k = 0 ; k < 4 ; ++k)
if (4*threadIdx.x + k < cnt) {
router[thread_keys[k]] = thread_vals.i[k];
}
__syncthreads();
/*write payloads to partition buckets in "somewhat coalesced manner"*/
#pragma unroll
for (int k = 0 ; k < 4 ; ++k){
if (threadIdx.x + 1024 * k < total_cnt) {
int32_t val = router[threadIdx.x + 1024 * k];
int32_t partition = thread_parts[k];
uint32_t cnt = router[1024 * 4 + partition] - (threadIdx.x + 1024 * k);
uint32_t bucket = router[1024 * 4 + 2 * parts + partition];
if (((bucket + cnt) ^ bucket) & ~bucket_size_mask){
uint32_t next_buck = router[1024 * 4 + 3 * parts + partition];
cnt = ((bucket + cnt) & bucket_size_mask);
bucket = next_buck;
}
bucket += cnt;
output_P[bucket] = val;
}
}
if (threadIdx.x == 0) router[0] = 0;
}
}
#define LOCAL_BUCKETS_BITS 10
#define LOCAL_BUCKETS ((1 << LOCAL_BUCKETS_BITS))
#define MAX_BIT 32
__device__ int ctzd (int x) {
if (x == 0)
return 32;
int n = 0;
if ((n & 0x0000FFFF) == 0) {
n += 16;
x >>= 16;
}
if ((n & 0x000000FF) == 0) {
n += 8;
x >>= 8;
}
if ((n & 0x0000000F) == 0) {
n += 4;
x >>= 4;
}
if ((n & 0x00000003) == 0) {
n += 2;
x >>= 2;
}
if ((n & 0x00000001) == 0) {
n += 1;
x >>= 1;
}
return n;
}
__global__ void init_metadata_double (
uint64_t * __restrict__ heads1,
uint32_t * __restrict__ buckets_used1,
uint32_t * __restrict__ chains1,
uint32_t * __restrict__ out_cnts1,
uint32_t parts1,
uint32_t buckets_num1,
uint64_t * __restrict__ heads2,
uint32_t * __restrict__ buckets_used2,
uint32_t * __restrict__ chains2,
uint32_t * __restrict__ out_cnts2,
uint32_t parts2,
uint32_t buckets_num2
) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < buckets_num1; i += blockDim.x*gridDim.x)
chains1[i] = 0;
for (int i = tid; i < parts1; i += blockDim.x*gridDim.x)
out_cnts1[i] = 0;
for (int i = tid; i < parts1; i += blockDim.x*gridDim.x)
heads1[i] = (1 << 18) + (((uint64_t) bucket_size_mask) << 32);
if (tid == 0) {
*buckets_used1 = parts1;
}
for (int i = tid; i < buckets_num2; i += blockDim.x*gridDim.x)
chains2[i] = 0;
for (int i = tid; i < parts2; i += blockDim.x*gridDim.x)
out_cnts2[i] = 0;
for (int i = tid; i < parts2; i += blockDim.x*gridDim.x)
heads2[i] = (1 << 18) + (((uint64_t) bucket_size_mask) << 32);
if (tid == 0) {
*buckets_used2 = parts2;
}
}
/*
Building phase for non-partitioned hash join with perfect hashing (so this property is reflected in the code, we don't follow chains), it is the best case for non-partitioned
data=array of the keys
payload=array of payloads
n=number of tuples
lookup=lookup table/hashtable that we build => we store the payload at position lookup[key]
*/
__global__ void build_perfect_array (int32_t* data, int32_t* payload, int n, int32_t* lookup) {
for (size_t i = 4 *(threadIdx.x + blockIdx.x * blockDim.x); i < n ; i += 4 * blockDim.x * gridDim.x){
vec4 thread_vals = *(reinterpret_cast<const vec4 *>(data + i));
vec4 thread_payloads = *(reinterpret_cast<const vec4 *>(payload + i));
#pragma unroll
for (int k = 0; k < 4; ++k) {
int32_t val = thread_vals.i[k];
int32_t payload = thread_payloads.i[k];
lookup[val] = payload + 1;
}
}
}
/*Probing phase for non-partitioned hash join with perfect hashing
data=keys for probe side
payload=payloads for probe side
n=number of elements
lookup=hashtable
aggr=the memory location in which we aggregate with atomics at the end*/
__global__ void probe_perfect_array (int32_t* data, int32_t* payload, int n, int32_t* lookup, int* aggr) {
int count = 0;
for (size_t i = 4 *(threadIdx.x + blockIdx.x * blockDim.x); i < n ; i += 4 * blockDim.x * gridDim.x){
vec4 thread_vals = *(reinterpret_cast<const vec4 *>(data + i));
vec4 thread_payloads = *(reinterpret_cast<const vec4 *>(payload + i));
#pragma unroll
for (int k = 0; k < 4; ++k) {
int val = thread_vals.i[k];
int payload = thread_payloads.i[k];
int res = lookup[val];
if (res)
count += (payload * (res - 1));
}
}
atomicAdd(aggr, count);
}
/*
Building phase for non-partitioned hash join with chaining
data=array of the keys
payload=array of payloads
n=number of tuples
log_parts=log size of hashtable/chains
output=the chains [the rest of the array stays in place]
head=the first element of each chain
*/
__global__ void build_ht_chains (int32_t* data, int n, uint32_t log_parts, int32_t* output, int* head) {
int parts = 1 << log_parts;
int parts_mask = parts-1;
for (size_t i = 4 *(threadIdx.x + blockIdx.x * blockDim.x); i < n ; i += 4 * blockDim.x * gridDim.x){
vec4 thread_vals = *(reinterpret_cast<const vec4 *>(data + i));
#pragma unroll
for (int k = 0; k < 4; ++k) {
int val = thread_vals.i[k];
int hval = val & parts_mask;
int last = atomicExch(head + hval, i+k+1);
//int64_t wr = (((int64_t) last) << 32) + val;
output[i + k] = last;
}
}
}
/*
Probing phase for non-partitioned hash join with chaining
data=array of the keys
payload=array of payloads
n=number of tuples
log_parts=log size of hashtable/chains
ht=the chains that show the successor for each build element
head=the first element of each chain
ht_key=the keys of the hashtable as an array
ht_pay=the payloads of the hashtable as an array
aggr=the memory location in which we aggregate with atomics at the end
*/
__global__ void chains_probing (int32_t* data, int32_t* payload, int n, uint32_t log_parts, int32_t* ht, int32_t* ht_key, int32_t* ht_pay, int* head, int* aggr) {
int parts = 1 << log_parts;
int parts_mask = parts-1;
int count = 0;
for (size_t i = 4 *(threadIdx.x + blockIdx.x * blockDim.x); i < n ; i += 4 * blockDim.x * gridDim.x){
vec4 thread_vals = *(reinterpret_cast<const vec4 *>(data + i));
vec4 thread_payloads = *(reinterpret_cast<const vec4 *>(payload + i));
#pragma unroll
for (int k = 0; k < 4; ++k) {
int val = thread_vals.i[k];
int payload = thread_payloads.i[k];
int hval = val & parts_mask;
int next = head[hval];
while (next != 0) {
int ht_val = ht_key[next-1];
if (ht_val == val)
count += (payload * ht_pay[next-1]);
next = ht[next-1];
}
}
}
atomicAdd(aggr, count);
}
/*functions for linear probing
FIXME: there is a bug so it is not operational yet [was not in paper so this is not urgent]
*/
__global__ void ht_hist (int* data, int n, int log_parts, int* hist) {
int parts = 1 << log_parts;
int parts_mask = parts-1;
for (size_t i = 4 *(threadIdx.x + blockIdx.x * blockDim.x); i < n ; i += 4 * blockDim.x * gridDim.x){
vec4 thread_vals = *(reinterpret_cast<const vec4 *>(data + i));
#pragma unroll
for (int k = 0; k < 4; ++k) {
int val = thread_vals.i[k];
int hval = val & parts_mask;
int off = atomicAdd(hist + hval, 1);
}
}
}
__global__ void ht_offsets (int log_parts, int* hist, int* offset, int* aggr) {
int parts = 1 << log_parts;
int parts_mask = parts-1;
for (size_t i = threadIdx.x + blockIdx.x * blockDim.x; i < parts; i += blockDim.x * gridDim.x) {
int cur = hist[i];
int off = atomicAdd(aggr, cur);
hist[i] = off;
offset[i] = off;
}
}
__global__ void build_ht_linear (int* data, int* payload, size_t n, int log_parts, int* offset, int* ht, int* htp) {
int parts = 1 << log_parts;
int parts_mask = parts-1;
for (size_t i = 4 *(threadIdx.x + blockIdx.x * blockDim.x); i < n ; i += 4 * blockDim.x * gridDim.x){
vec4 thread_vals = *(reinterpret_cast<const vec4 *>(data + i));
vec4 thread_payloads = *(reinterpret_cast<const vec4 *>(payload + i));
#pragma unroll
for (int k = 0; k < 4; ++k) {
int val = thread_vals.i[k];
int hval = val & parts_mask;
int off = atomicAdd(offset + hval, 1);
ht[off] = val;
htp[off] = thread_payloads.i[k];
}
}
}
__global__ void linear_probing (int* data, int* payload, int* ht, int* htp, int* offset_s, int* offset_e, size_t n, int log_parts, int* aggr) {
int parts = 1 << log_parts;
int parts_mask = parts-1;
int count = 0;
for (size_t i = 4 *(threadIdx.x + blockIdx.x * blockDim.x); i < n ; i += 4 * blockDim.x * gridDim.x){
vec4 thread_vals = *(reinterpret_cast<const vec4 *>(data + i));
vec4 thread_payloads = *(reinterpret_cast<const vec4 *>(payload + i));
#pragma unroll
for (int k = 0; k < 4; ++k) {
int val = thread_vals.i[k];
for (int j = 0; j < 32; j++) {
int probe = __shfl(val, j);
int pay = __shfl(thread_payloads.i[k], j);
int hval = probe & parts_mask;
int start = offset_s[hval];
int end = offset_e[hval];
for (int p = start + threadIdx.x % 32; p < end; p += 32) {
if (ht[p] == probe) {
count += pay*htp[p];
}
}
}
}
}
atomicAdd(aggr, count);
}
/*break "long" bucket chains to smaller chains
this helps load balancing because we can allocate work at sub-chain granularity
and effectively solve the skew problem
bucket_info=we store the packed (partition, element count) value for each bucket
chains=successor in partition's bucket list
out_cnts=count of elements in this partition
log_parts= log of number of partitions
threshold=the maximum number of elements per subchain*/
__global__ void decompose_chains (uint32_t* bucket_info, uint32_t* chains, uint32_t* out_cnts, uint32_t log_parts, int threshold) {
uint32_t parts = 1 << log_parts;
for (int p = threadIdx.x + blockIdx.x*blockDim.x; p < parts; p += gridDim.x*blockDim.x) {
uint32_t cur = p;
int32_t cnt = out_cnts[p];
uint32_t first_cnt = (cnt >= threshold)? threshold : cnt;
int32_t cutoff = 0;
while (cnt > 0) {
cutoff += bucket_size;
cnt -= bucket_size;
uint32_t next = chains[cur];
if (cutoff >= threshold && cnt > 0) {
uint32_t local_cnt = (cnt >= threshold)? threshold : cnt;
bucket_info[next] = (p << 15) + local_cnt;
chains[cur] = 0;
cutoff = 0;
} else if (next != 0) {
bucket_info[next] = 0;
}
cur = next;
}
bucket_info[p] = (p << 15) + first_cnt;
}
}
/*kernel for performing the join between the partitioned relations
R,Pr= bucketized keys and payloads for relation R (probe side)
S,Ps= buckerized keys and payloads for relation S (build side)
bucket_info=the info that tells us which partition each bucket belongs to, the number of elements (or whether it belongs to a chain)
S_cnts, S_chain= for build-side we don't pack the info since we operate under the assumption that it is usually one bucket per partition (we don't load balance)
buckets_num=number of buckets for R
results=the memory address where we aggregate
*/
__global__ void join_partitioned_aggregate (
const int32_t* R,
const int32_t* Pr,
const uint32_t* R_chain,
const uint32_t* bucket_info,
const int32_t* S,
const int32_t* Ps,
const uint32_t* S_cnts,
const uint32_t* S_chain,
int32_t log_parts,
uint32_t* buckets_num,
int32_t* results) {
/*in order to saze space, we discard the partitioning bits, then we can try fitting keys in int16_t [HACK]*/
__shared__ int16_t elem[4096 + 512];
__shared__ int32_t payload[4096 + 512];
__shared__ int16_t next[4096 + 512];
__shared__ int32_t head[LOCAL_BUCKETS];
int tid = threadIdx.x;
int block = blockIdx.x;
int width = blockDim.x;
int pwidth = gridDim.x;
int parts = 1 << log_parts;
int lid = tid % 32;
int gnum = blockDim.x/32;
int count = 0;
int buckets_cnt = *buckets_num;
for (uint32_t bucket_r = block; bucket_r < buckets_cnt; bucket_r += pwidth) {
int info = bucket_info[bucket_r];
if (info != 0) {
/*unpack information on the subchain*/
int p = info >> 15;
int len_R = info & ((1 << 15) - 1);
int len_S = S_cnts[p];
/*S partition doesn't fit in shared memory*/
if (len_S > 4096+512) {
int bucket_r_loop = bucket_r;
/*now we will build a bucket of R side in the shared memory at a time and then probe it with S-side
sensible because
1) we have guarantees on size of R from the chain decomposition
2) this is a skewed scenario so size of S can be arbitrary*/
for (int offset_r = 0; offset_r < len_R; offset_r += bucket_size) {
for (int i = tid; i < LOCAL_BUCKETS; i += blockDim.x)
head[i] = -1;
__syncthreads();
/*build a hashtable from an R bucket*/
for (int base_r = 0; base_r < bucket_size; base_r += 4*blockDim.x) {
vec4 data_R = *(reinterpret_cast<const vec4 *>(R + bucket_size * bucket_r_loop + base_r + 4*threadIdx.x));
vec4 data_Pr = *(reinterpret_cast<const vec4 *>(Pr + bucket_size * bucket_r_loop + base_r + 4*threadIdx.x));
int l_cnt_R = len_R - offset_r - base_r - 4 * threadIdx.x;
int cnt = 0;
#pragma unroll
for (int k = 0; k < 4; k++) {
if (k < l_cnt_R) {
int val = data_R.i[k];
elem[base_r + k*blockDim.x + tid] = (int16_t) (val >> (LOCAL_BUCKETS_BITS + log_parts));
payload[base_r + k*blockDim.x + tid] = data_Pr.i[k];
int hval = (val >> log_parts) & (LOCAL_BUCKETS - 1);
int32_t last = atomicExch(&head[hval], base_r + k*blockDim.x + tid);
next[base_r + k*blockDim.x + tid] = last;
}
}
}
bucket_r_loop = R_chain[bucket_r_loop];
__syncthreads();
int bucket_s_loop = p;
int base_s = 0;
/*probe hashtable from an S bucket*/
for (int offset_s = 0; offset_s < len_S; offset_s += 4*blockDim.x) {
vec4 data_S = *(reinterpret_cast<const vec4 *>(S + bucket_size * bucket_s_loop + base_s + 4*threadIdx.x));
vec4 data_Ps = *(reinterpret_cast<const vec4 *>(Ps + bucket_size * bucket_s_loop + base_s + 4*threadIdx.x));
int l_cnt_S = len_S - offset_s - 4 * threadIdx.x;
#pragma unroll
for (int k = 0; k < 4; k++) {
int32_t val = data_S.i[k];
int32_t pval = data_Ps.i[k];
int16_t tval = (int16_t) (val >> (LOCAL_BUCKETS_BITS + log_parts));
int32_t hval = (val >> log_parts) & (LOCAL_BUCKETS - 1);
if (k < l_cnt_S) {
int32_t pos = head[hval];
while (pos >= 0) {
if (elem[pos] == tval) {
count += pval*payload[pos];
}
pos = next[pos];
}
}
}
base_s += 4*blockDim.x;
if (base_s >= bucket_size) {
bucket_s_loop = S_chain[bucket_s_loop];
base_s = 0;
}
}
__syncthreads();
}
} else {
for (int i = tid; i < LOCAL_BUCKETS; i += blockDim.x)
head[i] = -1;
int rem_s = len_S % 4096;
rem_s = (rem_s + 4 - 1)/4;
__syncthreads();
int off;
int it;
int base = 0;
it = p;
off = 0;
/*build hashtable for S-side*/
for (off = 0; off < len_S;) {
vec4 data_S = *(reinterpret_cast<const vec4 *>(S + bucket_size * it + base + 4*threadIdx.x));
vec4 data_Ps = *(reinterpret_cast<const vec4 *>(Ps + bucket_size * it + base +4*threadIdx.x));
int l_cnt_S = len_S - off - 4 * threadIdx.x;
#pragma unroll
for (int k = 0; k < 4; k++) {
if (k < l_cnt_S) {
int val = data_S.i[k];
elem[off + tid] = (int16_t) (val >> (LOCAL_BUCKETS_BITS + log_parts));
payload[off + tid] = data_Ps.i[k];
int hval = (val >> log_parts) & (LOCAL_BUCKETS - 1);
int32_t last = atomicExch(&head[hval], off + tid);
next[off + tid] = last;
}
off += (off < bucket_size)? blockDim.x : rem_s;
}
if (base >= bucket_size) {
it = S_chain[it];
base = 0;
}
}
__syncthreads();
it = bucket_r;
off = 0;
/*probe from R-side*/
for (; 0 < len_R; off += 4*blockDim.x, len_R -= 4*blockDim.x) {
vec4 data_R = *(reinterpret_cast<const vec4 *>(R + bucket_size * it + off + 4*threadIdx.x));
vec4 data_Pr = *(reinterpret_cast<const vec4 *>(Pr + bucket_size * it + off + 4*threadIdx.x));
int l_cnt_R = len_R - 4 * threadIdx.x;
#pragma unroll
for (int k = 0; k < 4; k++) {
int32_t val = data_R.i[k];
int32_t pval = data_Pr.i[k];
/*hack to fit more data in shared memory*/
int16_t tval = (int16_t) (val >> (LOCAL_BUCKETS_BITS + log_parts));
int32_t hval = (val >> log_parts) & (LOCAL_BUCKETS - 1);
if (k < l_cnt_R) {
int32_t pos = head[hval];
while (pos >= 0) {
if (elem[pos] == tval) {
count += pval*payload[pos];
}
pos = next[pos];
}
}
}
if (off >= bucket_size) {
it = R_chain[it];
off = 0;
}
}
__syncthreads();
}
}
}
atomicAdd(results, count);
__syncthreads();
}
/*maximum size of output, we always write at *write_offset MOD (FOLD+1)*
we use it in order to simulate the cases that output size explodes. we do the actual writes then overwrite them*/
#define FOLD ((1 << 24) - 1)
/*the number of elements that can be stored in a warp-level buffer during the join materialization*/
#define SHUFFLE_SIZE 16
/*practically the same as join_partitioned_aggregate
i add extra comments for the materialization technique*/
__global__ void join_partitioned_results (
const int32_t* R,
const int32_t* Pr,
const uint32_t* R_chain,
const uint32_t* bucket_info,
const int32_t* S,
const int32_t* Ps,
const uint32_t* S_cnts,
const uint32_t* S_chain,
int32_t log_parts,
uint32_t* buckets_num,
int32_t* results,
int32_t* output) {
__shared__ int16_t elem[4096 + 512];
__shared__ int32_t payload[4096 + 512];
__shared__ int16_t next[4096 + 512];
__shared__ int32_t head[LOCAL_BUCKETS];
__shared__ int32_t shuffle[2*SHUFFLE_SIZE*32];
int tid = threadIdx.x;
int block = blockIdx.x;
int width = blockDim.x;
int pwidth = gridDim.x;
int parts = 1 << log_parts;
int lid = tid % 32;
int gid = tid / 32;
int gnum = blockDim.x/32;
int count = 0;
int ptr;
int threadmask = (lid < 31)? ~((1 << (lid+1)) - 1) : 0;
int shuffle_ptr = 0;
int32_t* warp_shuffle = shuffle + gid * 2 * SHUFFLE_SIZE;
int buckets_cnt = *buckets_num;
for (uint32_t bucket_r = block; bucket_r < buckets_cnt; bucket_r += pwidth) {
int info = bucket_info[bucket_r];
if (info != 0) {
int p = info >> 15;
int len_R = info & ((1 << 15) - 1);
int len_S = S_cnts[p];
if (len_S > 4096+512) {
int bucket_r_loop = bucket_r;
for (int offset_r = 0; offset_r < len_R; offset_r += bucket_size) {
for (int i = tid; i < LOCAL_BUCKETS; i += blockDim.x)
head[i] = -1;
__syncthreads();
for (int base_r = 0; base_r < bucket_size; base_r += 4*blockDim.x) {
vec4 data_R = *(reinterpret_cast<const vec4 *>(R + bucket_size * bucket_r_loop + base_r + 4*threadIdx.x));
vec4 data_Pr = *(reinterpret_cast<const vec4 *>(Pr + bucket_size * bucket_r_loop + base_r + 4*threadIdx.x));
int l_cnt_R = len_R - offset_r - base_r - 4 * threadIdx.x;
int cnt = 0;
#pragma unroll
for (int k = 0; k < 4; k++) {
if (k < l_cnt_R) {
int val = data_R.i[k];
elem[base_r + k*blockDim.x + tid] = (int16_t) (val >> (LOCAL_BUCKETS_BITS + log_parts));
payload[base_r + k*blockDim.x + tid] = data_Pr.i[k];
int hval = (val >> log_parts) & (LOCAL_BUCKETS - 1);
int32_t last = atomicExch(&head[hval], base_r + k*blockDim.x + tid);
next[base_r + k*blockDim.x + tid] = last;
}
}
}
bucket_r_loop = R_chain[bucket_r_loop];
__syncthreads();
int bucket_s_loop = p;
int base_s = 0;
for (int offset_s = 0; offset_s < len_S; offset_s += 4*blockDim.x) {
vec4 data_S = *(reinterpret_cast<const vec4 *>(S + bucket_size * bucket_s_loop + base_s + 4*threadIdx.x));
vec4 data_Ps = *(reinterpret_cast<const vec4 *>(Ps + bucket_size * bucket_s_loop + base_s + 4*threadIdx.x));
int l_cnt_S = len_S - offset_s - 4 * threadIdx.x;
#pragma unroll
for (int k = 0; k < 4; k++) {
int32_t val = data_S.i[k];
int32_t pval = data_Ps.i[k];
int16_t tval = (int16_t) (val >> (LOCAL_BUCKETS_BITS + log_parts));
int32_t hval = (val >> log_parts) & (LOCAL_BUCKETS - 1);
int32_t pay;
int32_t pos = (k < l_cnt_S)? head[hval] : -1;
/*check at warp level whether someone is still following chain => this way we can shuffle without risk*/
int pred = (pos >= 0);
while (__any(pred)) {
int wr_intention = 0;
/*we have a match, fetch the data to be written*/
if (pred) {
if (elem[pos] == tval) {
pay = payload[pos];
wr_intention = 1;
count++;
}
pos = next[pos];
pred = (pos >= 0);
}
/*find out who had a match in this execution step*/
int mask = __ballot(wr_intention);
/*our software managed buffer will overflow, flush it*/
int wr_offset = shuffle_ptr + __popc(mask & threadmask);
shuffle_ptr = shuffle_ptr + __popc(mask);
/*while it overflows, flush
we flush 16 keys and then the 16 corresponding payloads consecutively, of course other formats might be friendlier*/
while (shuffle_ptr >= SHUFFLE_SIZE) {
if (wr_intention && (wr_offset < SHUFFLE_SIZE)) {
warp_shuffle[wr_offset] = pay;
warp_shuffle[wr_offset+SHUFFLE_SIZE] = pval;
wr_intention = 0;
}
if (lid == 0) {
ptr = atomicAdd(results, 2*SHUFFLE_SIZE);
ptr = ptr & FOLD;
}
ptr = __shfl(ptr, 0);
output[ptr + lid] = warp_shuffle[lid];
wr_offset -= SHUFFLE_SIZE;
shuffle_ptr -= SHUFFLE_SIZE;
}
/*now the fit, write them in buffer*/
if (wr_intention && (wr_offset >= 0)) {
warp_shuffle[wr_offset] = pay;
warp_shuffle[wr_offset+SHUFFLE_SIZE] = pval;
wr_intention = 0;
}
}
}
base_s += 4*blockDim.x;
if (base_s >= bucket_size) {
bucket_s_loop = S_chain[bucket_s_loop];
base_s = 0;
}
}
__syncthreads();
}
} else {
for (int i = tid; i < LOCAL_BUCKETS; i += blockDim.x)
head[i] = -1;
int rem_s = len_S % 4096;
rem_s = (rem_s + 4 - 1)/4;
__syncthreads();
int off;
int it;
int base = 0;
it = p;
off = 0;
for (off = 0; off < len_S;) {
vec4 data_S = *(reinterpret_cast<const vec4 *>(S + bucket_size * it + base + 4*threadIdx.x));
vec4 data_Ps = *(reinterpret_cast<const vec4 *>(Ps + bucket_size * it + base +4*threadIdx.x));
int l_cnt_S = len_S - off - 4 * threadIdx.x;
#pragma unroll
for (int k = 0; k < 4; k++) {
if (k < l_cnt_S) {
int val = data_S.i[k];
elem[off + tid] = (int16_t) (val >> (LOCAL_BUCKETS_BITS + log_parts));
payload[off + tid] = data_Ps.i[k];
int hval = (val >> log_parts) & (LOCAL_BUCKETS - 1);
int32_t last = atomicExch(&head[hval], off + tid);
next[off + tid] = last;
}
off += (off < bucket_size)? blockDim.x : rem_s;
base += blockDim.x;
}
if (base >= bucket_size) {
it = S_chain[it];
base = 0;
}
}
__syncthreads();
it = bucket_r;
off = 0;
for (; 0 < len_R; off += 4*blockDim.x, len_R -= 4*blockDim.x) {
int l_cnt_R = len_R - 4 * threadIdx.x;
vec4 data_R;
vec4 data_Pr;
data_R = *(reinterpret_cast<const vec4 *>(R + bucket_size * it + off + 4*threadIdx.x));
data_Pr = *(reinterpret_cast<const vec4 *>(Pr + bucket_size * it + off + 4*threadIdx.x));
#pragma unroll
for (int k = 0; k < 4; k++) {
int32_t val = data_R.i[k];
int32_t pval = data_Pr.i[k];
int16_t tval = (int16_t) (val >> (LOCAL_BUCKETS_BITS + log_parts));
int32_t hval = (val >> log_parts) & (LOCAL_BUCKETS - 1);
int32_t pay;
int32_t pos = (k < l_cnt_R)? head[hval] : -1;
/*same as previous code block*/
int pred = (pos >= 0);
while (__any(pred)) {
int wr_intention = 0;
if (pred) {
if (elem[pos] == tval) {
pay = payload[pos];
wr_intention = 1;
count++;
}
pos = next[pos];
pred = (pos >= 0);
}
int mask = __ballot(wr_intention);
int wr_offset = shuffle_ptr + __popc(mask & threadmask);
shuffle_ptr = shuffle_ptr + __popc(mask);
while (shuffle_ptr >= SHUFFLE_SIZE) {
if (wr_intention && (wr_offset < SHUFFLE_SIZE)) {
warp_shuffle[wr_offset] = pval;
warp_shuffle[wr_offset+SHUFFLE_SIZE] = pay;
wr_intention = 0;
}
if (lid == 0) {
ptr = atomicAdd(results, 2*SHUFFLE_SIZE);
ptr = ptr & FOLD;
}
ptr = __shfl(ptr, 0);
output[ptr + lid] = warp_shuffle[lid];
wr_offset -= SHUFFLE_SIZE;
shuffle_ptr -= SHUFFLE_SIZE;
}
if (wr_intention && (wr_offset >= 0)) {
warp_shuffle[wr_offset] = pval;
warp_shuffle[wr_offset+SHUFFLE_SIZE] = pay;
wr_intention = 0;
}
}
}
if (off >= bucket_size) {
it = R_chain[it];
off = 0;
}
}
__syncthreads();
}
}
}
if (lid == 0) {
ptr = atomicAdd(results, 2*shuffle_ptr);
ptr = ptr & FOLD;
}
ptr = __shfl(ptr, 0);
if (lid < shuffle_ptr) {
output[ptr + lid] = warp_shuffle[lid];
output[ptr + lid + shuffle_ptr] = warp_shuffle[lid + SHUFFLE_SIZE];
}
__syncthreads();
}
/*again the same but payload is the virtual tuple id and we late materialize from Dx arrays which store the actual columns that we need
also here we have no overflows because if we did, we wouldn't fit the data/extra columns :) */
__global__ void join_partitioned_varpayload (
const int32_t* R,
const int32_t* Pr,
const int32_t* Dr,
const uint32_t* R_chain,
const uint32_t* bucket_info,
const int32_t* S,
const int32_t* Ps,
const int32_t* Ds,
const uint32_t* S_cnts,
const uint32_t* S_chain,
int32_t log_parts,
int32_t col_num1,
int32_t col_num2,
int32_t rel_size,
uint32_t* buckets_num,
int32_t* results) {
__shared__ int16_t elem[4096 + 512];
__shared__ int32_t payload[4096 + 512];
__shared__ int16_t next[4096 + 512];
__shared__ int32_t head[LOCAL_BUCKETS];
int tid = threadIdx.x;
int block = blockIdx.x;
int width = blockDim.x;
int pwidth = gridDim.x;
int parts = 1 << log_parts;
int lid = tid % 32;
int gnum = blockDim.x/32;
int count = 0;
int buckets_cnt = *buckets_num;
for (uint32_t bucket_r = block; bucket_r < buckets_cnt; bucket_r += pwidth) {
int info = bucket_info[bucket_r];
if (info != 0) {
int p = info >> 15;
int len_R = info & ((1 << 15) - 1);
int len_S = S_cnts[p];
for (int i = tid; i < LOCAL_BUCKETS; i += blockDim.x)
head[i] = -1;
int rem_s = len_S % 4096;
rem_s = (rem_s + 4 - 1)/4;
__syncthreads();
int off;
int it;
int base = 0;
it = p;
off = 0;
for (off = 0; off < len_S;) {
vec4 data_S = *(reinterpret_cast<const vec4 *>(S + bucket_size * it + base + 4*threadIdx.x));
vec4 data_Ps = *(reinterpret_cast<const vec4 *>(Ps + bucket_size * it + base +4*threadIdx.x));
int l_cnt_S = len_S - off - 4 * threadIdx.x;
#pragma unroll
for (int k = 0; k < 4; k++) {
if (k < l_cnt_S) {
int val = data_S.i[k];
elem[off + tid] = (int16_t) (val >> (LOCAL_BUCKETS_BITS + log_parts));
payload[off + tid] = data_Ps.i[k];
int hval = (val >> log_parts) & (LOCAL_BUCKETS - 1);
int32_t last = atomicExch(&head[hval], off + tid);
next[off + tid] = last;
}
off += (off < bucket_size)? blockDim.x : rem_s;
}
if (base >= bucket_size) {
it = S_chain[it];
base = 0;
}
}
__syncthreads();
it = bucket_r;
off = 0;
for (; 0 < len_R; off += 4*blockDim.x, len_R -= 4*blockDim.x) {
vec4 data_R = *(reinterpret_cast<const vec4 *>(R + bucket_size * it + off + 4*threadIdx.x));
vec4 data_Pr = *(reinterpret_cast<const vec4 *>(Pr + bucket_size * it + off + 4*threadIdx.x));
int l_cnt_R = len_R - 4 * threadIdx.x;
#pragma unroll
for (int k = 0; k < 4; k++) {
int32_t val = data_R.i[k];
int32_t pval = data_Pr.i[k];
int16_t tval = (int16_t) (val >> (LOCAL_BUCKETS_BITS + log_parts));
int32_t hval = (val >> log_parts) & (LOCAL_BUCKETS - 1);
if (k < l_cnt_R) {
int32_t pos = head[hval];
while (pos >= 0) {
if (elem[pos] == tval) {
int32_t bval = payload[pos];
for (int z = 0; z < col_num1; z++)
count += Dr[pval + z*rel_size];
for (int z = 0; z < col_num2; z++)
count += Ds[bval + z*rel_size];
}
pos = next[pos];
}
}
}
if (off >= bucket_size) {
it = R_chain[it];
off = 0;
}
}
__syncthreads();
}
}
atomicAdd(results, count);
__syncthreads();
}
/*late materialization and perfect hashing*/
__global__ void probe_perfect_array_varpay (int32_t* data, int32_t* Dr, int n, int32_t* lookup, int32_t* Ds, int col_num1, int col_num2, int rel_size, int* aggr) {
int count = 0;
for (size_t i = threadIdx.x + blockIdx.x * blockDim.x; i < n ; i += blockDim.x * gridDim.x) {
int val = data[i];
int payload = i;
int res = lookup[val];
if (res > 0) {
res--;
for (int z = 0; z < col_num1; z++)
count += Dr[payload + z*rel_size];
for (int z = 0; z < col_num2; z++)
count += Ds[res + z*rel_size];
}
}
atomicAdd(aggr, count);
}
/*partition and compute metadata for relation with key+payload*/
void prepare_Relation_payload (int* R, int* R_temp, int* P, int* P_temp, size_t RelsNum, uint32_t buckets_num, uint64_t* heads[2], uint32_t* cnts[2], uint32_t* chains[2], uint32_t* buckets_used[2], uint32_t log_parts1, uint32_t log_parts2, uint32_t first_bit, hipStream_t streams, size_t* offsets_GPU, uint32_t num_threads) {
hipLaunchKernelGGL(( init_metadata_double), dim3(64), dim3(1024), 0, streams,
heads[0], buckets_used[0], chains[0], cnts[0], 1 << log_parts1, buckets_num,
heads[1], buckets_used[1], chains[1], cnts[1], 1 << (log_parts1 + log_parts2), buckets_num
);
hipLaunchKernelGGL(( partition_pass_one) , dim3(64), dim3(1024), (1024*4 + 4*(1 << log_parts1)) * sizeof(int32_t) + (4*num_threads+2)*sizeof(size_t), streams, 0, 0,
R, P,
offsets_GPU,
heads[0],
buckets_used[0],
chains[0],
cnts[0],
R_temp, P_temp,
RelsNum,
log_parts1,
first_bit + log_parts2,
num_threads
);
hipLaunchKernelGGL(( compute_bucket_info) , dim3(64), dim3(1024), 0, streams, chains[0], cnts[0], log_parts1);
hipLaunchKernelGGL(( partition_pass_two) , dim3(64), dim3(1024), (1024*4 + 4*(1 << log_parts2)) * sizeof(int32_t) + ((2 * (1 << log_parts2) + 1)* sizeof(int32_t)), streams, 0, 0,
R_temp, P_temp,
chains[0],
buckets_used[1], heads[1], chains[1], cnts[1],
R, P,
log_parts1, log_parts2, first_bit,
buckets_used[0]);
}
/*partition and compute metadata for relation with key+payload. We use different buffers at the end (it makes sense for UVA based techniques)*/
void prepare_Relation_payload_triple (int* R, int* R_temp, int* R_final, int* P, int* P_temp, int* P_final, size_t RelsNum, uint32_t buckets_num, uint64_t* heads[2], uint32_t* cnts[2], uint32_t* chains[2], uint32_t* buckets_used[2], uint32_t log_parts1, uint32_t log_parts2, uint32_t first_bit, hipStream_t streams, size_t* offsets_GPU, uint32_t num_threads) {
hipLaunchKernelGGL(( init_metadata_double), dim3(64), dim3(1024), 0, streams,
heads[0], buckets_used[0], chains[0], cnts[0], 1 << log_parts1, buckets_num,
heads[1], buckets_used[1], chains[1], cnts[1], 1 << (log_parts1 + log_parts2), buckets_num
);
hipLaunchKernelGGL(( partition_pass_one) , dim3(64), dim3(1024), (1024*4 + 4*(1 << log_parts1)) * sizeof(int32_t) + (4*num_threads+2)*sizeof(size_t), streams, 0, 0,
R, P,
offsets_GPU,
heads[0],
buckets_used[0],
chains[0],
cnts[0],
R_temp, P_temp,
RelsNum,
log_parts1,
first_bit + log_parts2,
num_threads
);
CHK_ERROR(hipDeviceSynchronize());
hipLaunchKernelGGL(( compute_bucket_info) , dim3(64), dim3(1024), 0, streams, chains[0], cnts[0], log_parts1);
hipLaunchKernelGGL(( partition_pass_two) , dim3(64), dim3(1024), (1024*4 + 4*(1 << log_parts2)) * sizeof(int32_t) + ((2 * (1 << log_parts2) + 1)* sizeof(int32_t)), streams, 0, 0,
R_temp, P_temp,
chains[0],
buckets_used[1], heads[1], chains[1], cnts[1],
R_final, P_final,
log_parts1, log_parts2, first_bit,
buckets_used[0]);
}
template <typename Tv>
struct chain_iterator_ref_generic{
Tv x ;
int cnt;
};
template <typename T, typename Tv>
class chain_iterator_generic{
private:
const T * __restrict__ S_parts ;
const uint32_t * __restrict__ S_chains ;
const uint32_t cnt ;
const T * __restrict__ ptr ;
uint32_t current_bucket ;
uint32_t next_bucket ;
uint32_t i ;
public:
__device__ __forceinline__ chain_iterator_generic(
const T * __restrict__ S_parts ,
const uint32_t * __restrict__ S_cnts ,
const uint32_t * __restrict__ S_chains ,
uint32_t current_partition):
S_parts(S_parts + (16/sizeof(T)) * threadIdx.x), S_chains(S_chains),
cnt((S_cnts[current_partition]/((16/sizeof(T)) * blockDim.x))*(16/sizeof(T)) + max(((int32_t) (S_cnts[current_partition] % ((16/sizeof(T)) * blockDim.x))) - ((int32_t) ((16/sizeof(T)) * threadIdx.x)), 0)),
ptr(S_parts + ((size_t) current_partition << log2_bucket_size) + (16/sizeof(T)) * threadIdx.x),
current_bucket(current_partition),
next_bucket(S_chains[current_partition]),
i(0){}
__device__ __forceinline__ chain_iterator_generic(
const uint32_t * __restrict__ S_cnts,
uint32_t current_partition):
cnt(0),
i(((S_cnts[current_partition] + (16/sizeof(T)) * blockDim.x - 1)/((16/sizeof(T)) * blockDim.x))*(16/sizeof(T))){}
__device__ __forceinline__ chain_iterator_generic<T, Tv>& operator++(){
i += (16/sizeof(T));// * blockDim.x;
ptr += (16/sizeof(T)) * blockDim.x;
if ((i * blockDim.x) & bucket_size_mask) return *this;
current_bucket = next_bucket;//int_shared[0];
ptr = S_parts + (current_bucket << log2_bucket_size);
next_bucket = S_chains[next_bucket];
return *this;
}
__device__ __forceinline__ chain_iterator_ref_generic<Tv> operator*() const {
chain_iterator_ref_generic<Tv> tmp;
tmp.x = *reinterpret_cast<const Tv *>(ptr);
tmp.cnt = cnt - i;
return tmp;
}
__device__ __forceinline__ bool operator!=(const chain_iterator_generic<T, Tv>& o){
return i != o.i;
}
};
template <typename T, typename Tv>
class chain_generic{
private:
const T * __restrict__ S_parts ;
const uint32_t * __restrict__ S_cnts ;
const uint32_t * __restrict__ S_chains ;
const uint32_t partition;
public:
__device__ __host__ __forceinline__ chain_generic(
const T * __restrict__ S_parts ,
const uint32_t * __restrict__ S_cnts ,
const uint32_t * __restrict__ S_chains ,
uint32_t partition):
S_parts(S_parts), S_cnts(S_cnts), S_chains(S_chains), partition(partition){}
__device__ __forceinline__ chain_iterator_generic<T, Tv> begin() const {
return chain_iterator_generic<T, Tv>(S_parts, S_cnts, S_chains, partition);
}
__device__ __forceinline__ chain_iterator_generic<T, Tv> end() const {
return chain_iterator_generic<T, Tv>(S_cnts, partition);
}
};
template <typename T, typename Tv>
class chains_generic {
private:
const T * __restrict__ S_parts ;
const uint32_t * __restrict__ S_cnts ;
const uint32_t * __restrict__ S_chains ;
public:
__device__ __host__ __forceinline__ chains_generic(
const T * __restrict__ S_parts ,
const uint32_t * __restrict__ S_cnts ,
const uint32_t * __restrict__ S_chains ):
S_parts(S_parts), S_cnts(S_cnts), S_chains(S_chains){}
__device__ __host__ __forceinline__ chain_generic<T, Tv> get_chain(uint32_t partition) const{
return chain_generic<T, Tv>(S_parts, S_cnts, S_chains, partition);
}
__device__ __forceinline__ uint32_t get_chain_size(uint32_t partition) const{
return S_cnts[partition];
}
};
struct chain_iterator_ref{
vec4 x ;
int cnt;
};
struct chain_iterator_i_ref{
int32_t x;
bool v;
};
class chain_iterator{
private:
const int32_t * __restrict__ S_parts ;
const uint32_t * __restrict__ S_chains ;
const uint32_t cnt ;
const int32_t * __restrict__ ptr ;
uint32_t current_bucket ;
uint32_t next_bucket ;
uint32_t i ;
public:
// __device__ __forceinline__ chain_iterator(
// const int32_t * __restrict__ S_parts ,
// const uint32_t * __restrict__ S_cnts ,
// const uint32_t * __restrict__ S_chains ):
// S_parts(S_parts), S_chains(S_chains), cnt(S_cnts[blockIdx.x]), current_bucket(blockIdx.x), i(0){}
__device__ __forceinline__ chain_iterator(
const int32_t * __restrict__ S_parts ,
const uint32_t * __restrict__ S_cnts ,
const uint32_t * __restrict__ S_chains ,
uint32_t current_partition):
S_parts(S_parts + 4 * threadIdx.x), S_chains(S_chains), cnt((S_cnts[current_partition]/(4 * blockDim.x))*4 + max(((int32_t) (S_cnts[current_partition] % (4 * blockDim.x))) - ((int32_t) (4 * threadIdx.x)), 0)), ptr(S_parts + ((size_t) current_partition << log2_bucket_size) + 4 * threadIdx.x), current_bucket(current_partition), next_bucket(S_chains[current_partition]), i(0){}
// __device__ __forceinline__ chain_iterator(
// const uint32_t * __restrict__ S_cnts):
// cnt(0), i(((S_cnts[blockIdx.x] + 4 * blockDim.x - 1)/(4 * blockDim.x)) * 4 * blockDim.x){}
__device__ __forceinline__ chain_iterator(
const uint32_t * __restrict__ S_cnts,
uint32_t current_partition):
cnt(0), i(((S_cnts[current_partition] + 4 * blockDim.x - 1)/(4 * blockDim.x))*4){}
__device__ __forceinline__ chain_iterator& operator++(){
i += 4;// * blockDim.x;
ptr += 4 * blockDim.x;
if ((i * blockDim.x) & bucket_size_mask) return *this;
current_bucket = next_bucket;//int_shared[0];
ptr = S_parts + (current_bucket << log2_bucket_size);
next_bucket = S_chains[next_bucket];
return *this;
}
__device__ __forceinline__ chain_iterator_ref operator*() const {
chain_iterator_ref tmp;
tmp.x = *reinterpret_cast<const vec4 *>(ptr);
tmp.cnt = cnt - i;
return tmp;
}
__device__ __forceinline__ bool operator!=(const chain_iterator& o){
return i != o.i;
}
};
class chain_iterator_i{
private:
const int32_t * __restrict__ S_parts ;
const uint32_t * __restrict__ S_chains ;
const uint32_t cnt ;
const int32_t * __restrict__ ptr ;
uint32_t current_bucket ;
uint32_t next_bucket ;
uint32_t i ;
public:
// __device__ __forceinline__ chain_iterator_i(
// const int32_t * __restrict__ S_parts ,
// const uint32_t * __restrict__ S_cnts ,
// const uint32_t * __restrict__ S_chains ):
// S_parts(S_parts), S_chains(S_chains), cnt(S_cnts[blockIdx.x]), current_bucket(blockIdx.x), i(0){}
__device__ __forceinline__ chain_iterator_i(
const int32_t * __restrict__ S_parts ,
const uint32_t * __restrict__ S_cnts ,
const uint32_t * __restrict__ S_chains ,
uint32_t current_partition):
S_parts(S_parts + threadIdx.x), S_chains(S_chains), cnt((S_cnts[current_partition]/blockDim.x) + max(((int32_t) (S_cnts[current_partition] % (blockDim.x))) - ((int32_t) (threadIdx.x)), 0)), ptr(S_parts + ((size_t) current_partition << log2_bucket_size) + threadIdx.x), current_bucket(current_partition), next_bucket(S_chains[current_partition]), i(0){}
// __device__ __forceinline__ chain_iterator_i(
// const uint32_t * __restrict__ S_cnts):
// cnt(0), i(((S_cnts[blockIdx.x] + 4 * blockDim.x - 1)/(4 * blockDim.x)) * 4 * blockDim.x){}
__device__ __forceinline__ chain_iterator_i(
const uint32_t * __restrict__ S_cnts,
uint32_t current_partition):
cnt(0), i(((S_cnts[current_partition] + blockDim.x - 1)/(blockDim.x))){}
__device__ __forceinline__ chain_iterator_i& operator++(){
++i;// * blockDim.x;
ptr += blockDim.x;
if ((i * blockDim.x) & bucket_size_mask) return *this;
current_bucket = next_bucket;//int_shared[0];
ptr = S_parts + (current_bucket << log2_bucket_size);
next_bucket = S_chains[next_bucket];
return *this;
}
__device__ __forceinline__ chain_iterator_i_ref operator*() const {
chain_iterator_i_ref tmp;
tmp.x = *ptr;
tmp.v = i < cnt;
return tmp;
}
__device__ __forceinline__ bool operator!=(const chain_iterator_i& o){
return i != o.i;
}
};
class chain_i{
private:
const int32_t * __restrict__ S_parts ;
const uint32_t * __restrict__ S_cnts ;
const uint32_t * __restrict__ S_chains ;
const uint32_t partition;
public:
__device__ __host__ __forceinline__ chain_i(
const int32_t * __restrict__ S_parts ,
const uint32_t * __restrict__ S_cnts ,
const uint32_t * __restrict__ S_chains ,
uint32_t partition):
S_parts(S_parts), S_cnts(S_cnts), S_chains(S_chains), partition(partition){}
__device__ __forceinline__ chain_iterator_i begin() const {
return chain_iterator_i(S_parts, S_cnts, S_chains, partition);
}
__device__ __forceinline__ chain_iterator_i end() const {
return chain_iterator_i(S_cnts, partition);
}
};
class chain{
private:
const int32_t * __restrict__ S_parts ;
const uint32_t * __restrict__ S_cnts ;
const uint32_t * __restrict__ S_chains ;
const uint32_t partition;
public:
__device__ __host__ __forceinline__ chain(
const int32_t * __restrict__ S_parts ,
const uint32_t * __restrict__ S_cnts ,
const uint32_t * __restrict__ S_chains ,
uint32_t partition):
S_parts(S_parts), S_cnts(S_cnts), S_chains(S_chains), partition(partition){}
__device__ __forceinline__ chain_iterator begin() const {
return chain_iterator(S_parts, S_cnts, S_chains, partition);
}
__device__ __forceinline__ chain_iterator end() const {
return chain_iterator(S_cnts, partition);
}
};
class chains{
private:
const int32_t * __restrict__ S_parts ;
const uint32_t * __restrict__ S_cnts ;
const uint32_t * __restrict__ S_chains ;
public:
__device__ __host__ __forceinline__ chains(
const int32_t * __restrict__ S_parts ,
const uint32_t * __restrict__ S_cnts ,
const uint32_t * __restrict__ S_chains ):
S_parts(S_parts), S_cnts(S_cnts), S_chains(S_chains){}
__device__ __host__ __forceinline__ chain get_chain(uint32_t partition) const{
return chain(S_parts, S_cnts, S_chains, partition);
}
__device__ __host__ __forceinline__ chain_i get_chain_i(uint32_t partition) const{
return chain_i(S_parts, S_cnts, S_chains, partition);
}
__device__ __forceinline__ uint32_t get_chain_size(uint32_t partition) const{
return S_cnts[partition];
}
};
/*essentially the join_partitioned_aggregate*/
__global__ void join_partitioned_shared (
const int32_t* R,
const int32_t* Pr,
const uint32_t* R_cnts,
const uint32_t* R_chain,
const int32_t* S,
const int32_t* Ps,
const uint32_t* S_cnts,
const uint32_t* S_chain,
int32_t log_parts,
int32_t* results) {
__shared__ int16_t elem[4096 + 512];
__shared__ int32_t payload[4096 + 512];
__shared__ int16_t next[4096 + 512];
__shared__ int32_t head[LOCAL_BUCKETS];
int tid = threadIdx.x;
int block = blockIdx.x;
int width = blockDim.x;
int pwidth = gridDim.x;
int parts = 1 << log_parts;
int lid = tid % 32;
int gnum = blockDim.x/32;
int count = 0;
int pr = -1;
int ps = -1;
for (uint32_t p = block; p < parts; p += pwidth) {
int len_R = R_cnts[p];
int len_S = S_cnts[p];
if (len_S > 4096 + 512) {
/*it was a microbenchmark so I didn't code this part*/
continue;
} else {
chain R_chains(R, R_cnts, R_chain, p);
chain Pr_chains(Pr, R_cnts, R_chain, p);
chain S_chains(S, S_cnts, S_chain, p);
chain Ps_chains(Ps, S_cnts, S_chain, p);
int off = 0;
for (int i = tid; i < LOCAL_BUCKETS; i += blockDim.x)
head[i] = -1;
int rem_s = len_S % 4096;
rem_s = (rem_s + 4 - 1)/4;
__syncthreads();
chain_iterator it_S = S_chains.begin();
chain_iterator it_Ps = Ps_chains.begin();
for (;it_S != S_chains.end(); ++it_S, ++it_Ps) {
vec4 data_S = (*it_S).x;
vec4 data_Ps = (*it_Ps).x;
int l_cnt_S = (*it_S).cnt;
#pragma unroll
for (int k = 0; k < 4; k++) {
if (k < l_cnt_S) {
int val = data_S.i[k];
elem[off + tid] = (int16_t) (val >> (LOCAL_BUCKETS_BITS + log_parts));
payload[off + tid] = data_Ps.i[k];
int hval = (val >> log_parts) & (LOCAL_BUCKETS - 1);
int32_t last = atomicExch(&head[hval], off + tid);
next[off + tid] = last;
}
off += (off < 4096)? blockDim.x : rem_s;
}
}
__syncthreads();
chain_iterator it_R = R_chains.begin();
chain_iterator it_Pr = Pr_chains.begin();
for (;it_R != R_chains.end(); ++it_R, ++it_Pr) {
vec4 data_R = (*it_R).x;
vec4 data_Pr = (*it_Pr).x;
int l_cnt_R = (*it_R).cnt;
#pragma unroll
for (int k = 0; k < 4; k++) {
int32_t val = data_R.i[k];
int32_t pval = data_Pr.i[k];
int16_t tval = (int16_t) (val >> (LOCAL_BUCKETS_BITS + log_parts));
int32_t hval = (val >> log_parts) & (LOCAL_BUCKETS - 1);
if (k < l_cnt_R) {
int32_t pos = head[hval];
while (pos >= 0) {
if (elem[pos] == tval) {
count += pval*payload[pos];
}
pos = next[pos];
}
}
}
}
__syncthreads();
}
}
atomicAdd(results, count);
__syncthreads();
}
/*essentially the join_partitioned_aggregate but builds hashtable in GPU memory*/
__global__ void join_partitioned_global (
const int32_t* R,
const int32_t* Pr,
const uint32_t* R_cnts,
const uint32_t* R_chain,
const int32_t* S,
const int32_t* Ps,
const uint32_t* S_cnts,
const uint32_t* S_chain,
int32_t log_parts,
int32_t* results,
int32_t* buffer) {
int tid = threadIdx.x;
int block = blockIdx.x;
int width = blockDim.x;
int pwidth = gridDim.x;
int parts = 1 << log_parts;
buffer += block*8*4096;
int16_t* elem = (int16_t*) buffer;
int32_t* payload = buffer + 4096 + 512;;
int16_t* next = (int16_t*) (buffer + 2*(4096 + 512));
int32_t* head = buffer + 3*(4096+512);
int lid = tid % 32;
int gnum = blockDim.x/32;
int count = 0;
int pr = -1;
int ps = -1;
for (uint32_t p = block; p < parts; p += pwidth) {
chain R_chains(R, R_cnts, R_chain, p);
chain Pr_chains(Pr, R_cnts, R_chain, p);
chain S_chains(S, S_cnts, S_chain, p);
chain Ps_chains(Ps, S_cnts, S_chain, p);
int len_R = R_cnts[p];
int len_S = S_cnts[p];
if (len_S > 4096 + 512) {
/*it was a microbenchmark so I didn't code this part*/
continue;
} else {
int off = 0;
for (int i = tid; i < LOCAL_BUCKETS; i += blockDim.x)
head[i] = -1;
int rem_s = len_S % 4096;
rem_s = (rem_s + 4 - 1)/4;
__syncthreads();
chain_iterator it_S = S_chains.begin();
chain_iterator it_Ps = Ps_chains.begin();
for (;it_S != S_chains.end(); ++it_S, ++it_Ps) {
vec4 data_S = (*it_S).x;
vec4 data_Ps = (*it_Ps).x;
int l_cnt_S = (*it_S).cnt;
#pragma unroll
for (int k = 0; k < 4; k++) {
if (k < l_cnt_S) {
int val = data_S.i[k];
elem[off + tid] = (int16_t) (val >> (LOCAL_BUCKETS_BITS + log_parts));
payload[off + tid] = data_Ps.i[k];
int hval = (val >> log_parts) & (LOCAL_BUCKETS - 1);
int32_t last = atomicExch(&head[hval], off + tid);
next[off + tid] = last;
}
off += (off < 4096)? blockDim.x : rem_s;
}
}
__syncthreads();
chain_iterator it_R = R_chains.begin();
chain_iterator it_Pr = Pr_chains.begin();
for (;it_R != R_chains.end(); ++it_R, ++it_Pr) {
vec4 data_R = (*it_R).x;
vec4 data_Pr = (*it_Pr).x;
int l_cnt_R = (*it_R).cnt;
#pragma unroll
for (int k = 0; k < 4; k++) {
int32_t val = data_R.i[k];
int32_t pval = data_Pr.i[k];
int16_t tval = (int16_t) (val >> (LOCAL_BUCKETS_BITS + log_parts));
int32_t hval = (val >> log_parts) & (LOCAL_BUCKETS - 1);
if (k < l_cnt_R) {
int32_t pos = head[hval];
while (pos >= 0) {
if (elem[pos] == tval) {
count += pval*payload[pos];
}
pos = next[pos];
}
}
}
}
__syncthreads();
}
}
atomicAdd(results, count);
__syncthreads();
} | c01bc5b31e95723911c760681941713f80c29d3b.cu | /*Copyright (c) 2018 Data Intensive Applications and Systems Laboratory (DIAS)
Ecole Polytechnique Federale de Lausanne
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.*/
#include <cassert>
#include <iostream>
#include <numa.h>
#include <unistd.h>
#include "join-primitives.cuh"
__global__ void init_payload (int* R, int n) {
for (int i = threadIdx.x + blockIdx.x*blockDim.x; i < n; i += blockDim.x*gridDim.x)
R[i] = i;
}
/*
S= keys of data to be partitioned
P= payloads of data to be partitioned
heads= keeps information on first bucket per partition and number of elements in it, packet in one 64-bit integer (only used here)
chains= the successor of a bucket in the bucket list
out_cnts= number of elements per partition
buckets_used= how many buckets are reserved by the partitioning already
offsets= describe the segments that occur due to partitioning
note: multithreaded partitioning creates partitions that consist of contiguous segments
=> iterate over these segments to avoid handling empty slots
output_S= bucketized partitions of data keys
output_P= bucketized partitions of data payloads
cnt= number of elements to partition on total
log_parts- log of number of partitions
first_bit= shift the keys before "hashing"
num_threads= number of threads used in CPU side, used together with offsets
preconditions:
heads: current bucket (1 << 18) [special value for no bucket] and -1 elements (first write allocates bucket)
out_cnts: 0
buckets_used= number of partitions (first num_parts buckets are reserved)
*/
__global__ void partition_pass_one (
const int32_t * __restrict__ S,
const int32_t * __restrict__ P,
const size_t * __restrict__ offsets,
uint64_t * __restrict__ heads,
uint32_t * __restrict__ buckets_used,
uint32_t * __restrict__ chains,
uint32_t * __restrict__ out_cnts,
int32_t * __restrict__ output_S,
int32_t * __restrict__ output_P,
size_t cnt,
uint32_t log_parts,
uint32_t first_bit,
uint32_t num_threads) {
assert((((size_t) bucket_size) + ((size_t) blockDim.x) * gridDim.x) < (((size_t) 1) << 32));
const uint32_t parts = 1 << log_parts;
const int32_t parts_mask = parts - 1;
uint32_t * router = (uint32_t *) int_shared;
uint32_t segment = 0;
size_t segment_limit = offsets[1];
size_t segment_next = offsets[2];
size_t* shared_offsets = (size_t*) (int_shared + 1024*4 + 4*parts);
/*if no segmentation in input use one segment with all data, else copy the segment info*/
if (offsets != NULL) {
for (int i = threadIdx.x; i < 4*num_threads; i += blockDim.x) {
shared_offsets[i] = offsets[i];
}
} else {
for (int i = threadIdx.x; i < 4*num_threads; i += blockDim.x) {
if (i == 1)
shared_offsets[i] = cnt;
else
shared_offsets[i] = 0;
}
}
shared_offsets[4*num_threads] = cnt+4096;
shared_offsets[4*num_threads+1] = cnt+4096;
/*partition element counter starts at 0*/
for (size_t j = threadIdx.x ; j < parts ; j += blockDim.x )
router[1024*4 + parts + j] = 0;
if (threadIdx.x == 0)
router[0] = 0;
__syncthreads();
/*iterate over the segments*/
for (int u = 0; u < 2*num_threads; u++) {
size_t segment_start = shared_offsets[2*u];
size_t segment_limit = shared_offsets[2*u + 1];
size_t segment_end = segment_start + ((segment_limit - segment_start + 4096 - 1)/4096)*4096;
for (size_t i = 4 *(threadIdx.x + blockIdx.x * blockDim.x) + segment_start; i < segment_end ; i += 4 * blockDim.x * gridDim.x) {
vec4 thread_vals = *(reinterpret_cast<const vec4 *>(S + i));
uint32_t thread_keys[4];
/*compute local histogram for a chunk of 4*blockDim.x elements*/
#pragma unroll
for (int k = 0 ; k < 4 ; ++k){
if (i + k < segment_limit){
uint32_t partition = (hasht(thread_vals.i[k]) >> first_bit) & parts_mask;
atomicAdd(router + (1024 * 4 + parts + partition), 1);
thread_keys[k] = partition;
} else {
thread_keys[k] = 0;
}
}
__syncthreads();
for (size_t j = threadIdx.x; j < parts ; j += blockDim.x ) {
uint32_t cnt = router[1024 * 4 + parts + j];
if (cnt > 0){
atomicAdd(out_cnts + j, cnt);
uint32_t pcnt ;
uint32_t bucket ;
uint32_t next_buck;
bool repeat = true;
while (__any(repeat)){
if (repeat){
/*check if any of the output bucket is filling up*/
uint64_t old_heads = atomicAdd(heads + j, ((uint64_t) cnt) << 32);
atomicMin(heads + j, ((uint64_t) (2*bucket_size)) << 32);
pcnt = ((uint32_t) (old_heads >> 32));
bucket = (uint32_t) old_heads ;
/*now there are two cases:
// 2) old_heads.cnt > bucket_size ( => locked => retry)
// if (pcnt >= bucket_size) continue;*/
if (pcnt < bucket_size){
/* 1) old_heads.cnt <= bucket_size*/
/*check if the bucket was filled*/
if (pcnt + cnt >= bucket_size){
if (bucket < (1 << 18)) {
next_buck = atomicAdd(buckets_used, 1);
chains[bucket] = next_buck;
} else {
next_buck = j;
}
uint64_t tmp = next_buck + (((uint64_t) (pcnt + cnt - bucket_size)) << 32);
atomicExch(heads + j, tmp);
} else {
next_buck = bucket;
}
repeat = false;
}
}
}
router[1024 * 4 + j] = atomicAdd(router, cnt);
router[1024 * 4 + parts + j] = 0;//cnt;//pcnt ;
router[1024 * 4 + 2 * parts + j] = (bucket << log2_bucket_size) + pcnt;
router[1024 * 4 + 3 * parts + j] = next_buck << log2_bucket_size ;
}
}
__syncthreads();
uint32_t total_cnt = router[0];
__syncthreads();
/*calculate write positions for block-wise shuffle => atomicAdd on start of partition*/
#pragma unroll
for (int k = 0 ; k < 4 ; ++k){
if (i + k < segment_limit)
thread_keys[k] = atomicAdd(router + (1024 * 4 + thread_keys[k]), 1);
}
/*write the keys in shared memory*/
#pragma unroll
for (int k = 0 ; k < 4 ; ++k)
if (i + k < segment_limit)
router[thread_keys[k]] = thread_vals.i[k];
__syncthreads();
int32_t thread_parts[4];
/*read shuffled keys and write them to output partitions "somewhat" coalesced*/
#pragma unroll
for (int k = 0 ; k < 4 ; ++k){
if (threadIdx.x + 1024 * k < total_cnt) {
int32_t val = router[threadIdx.x + 1024 * k];
uint32_t partition = (hasht(val) >> first_bit) & parts_mask;
uint32_t cnt = router[1024 * 4 + partition] - (threadIdx.x + 1024 * k);
uint32_t bucket = router[1024 * 4 + 2 * parts + partition];
if (((bucket + cnt) ^ bucket) & ~bucket_size_mask){
uint32_t next_buck = router[1024 * 4 + 3 * parts + partition];
cnt = ((bucket + cnt) & bucket_size_mask);
bucket = next_buck;
}
bucket += cnt;
output_S[bucket] = val;
thread_parts[k] = partition;
}
}
__syncthreads();
/*read payloads of original data*/
thread_vals = *(reinterpret_cast<const vec4 *>(P + i));
/*shuffle payloads in shared memory, in the same offsets that we used for their corresponding keys*/
#pragma unroll
for (int k = 0 ; k < 4 ; ++k)
if (i + k < segment_limit) {
router[thread_keys[k]] = thread_vals.i[k];
}
__syncthreads();
/*write payloads to partition buckets in "somewhat coalesced manner"*/
#pragma unroll
for (int k = 0 ; k < 4 ; ++k){
if (threadIdx.x + 1024 * k < total_cnt) {
int32_t val = router[threadIdx.x + 1024 * k];
int32_t partition = thread_parts[k];
uint32_t cnt = router[1024 * 4 + partition] - (threadIdx.x + 1024 * k);
uint32_t bucket = router[1024 * 4 + 2 * parts + partition];
if (((bucket + cnt) ^ bucket) & ~bucket_size_mask){
uint32_t next_buck = router[1024 * 4 + 3 * parts + partition];
cnt = ((bucket + cnt) & bucket_size_mask);
bucket = next_buck;
}
bucket += cnt;
output_P[bucket] = val;
}
}
if (threadIdx.x == 0) router[0] = 0;
}
}
}
/*
compute information for the second partitioning pass
input:
chains=points to the successor in the bucket list for each bucket (hint: we append new buckets to the end)
out_cnts=count of elements per partition
output:
chains=packed value of element count in bucket and the partition the bucket belongs to
*/
__global__ void compute_bucket_info (uint32_t* chains, uint32_t* out_cnts, uint32_t log_parts) {
uint32_t parts = 1 << log_parts;
for (int p = threadIdx.x + blockIdx.x*blockDim.x; p < parts; p += gridDim.x*blockDim.x) {
uint32_t cur = p;
int32_t cnt = out_cnts[p];
while (cnt > 0) {
uint32_t local_cnt = (cnt >= 4096)? 4096 : cnt;
uint32_t val = (p << 13) + local_cnt;
uint32_t next = chains[cur];
chains[cur] = val;
cur = next;
cnt -= 4096;
}
}
}
/*
S= keys of data to be re-partitioned
P= payloads of data to be re-partitioned
heads= keeps information on first bucket per partition and number of elements in it, packet in one 64-bit integer (only used here)
chains= the successor of a bucket in the bucket list
out_cnts= number of elements per partition
buckets_used= how many buckets are reserved by the partitioning already
offsets= describe the segments that occur due to partitioning
note: multithreaded partitioning creates partitions that consist of contiguous segments
=> iterate over these segments to avoid handling empty slots
output_S= bucketized partitions of data keys (results)
output_P= bucketized partitions of data payloads (results)
S_log_parts- log of number of partitions for previous pass
log_parts- log of number of partitions for this pass
first_bit= shift the keys before "hashing"
bucket_num_ptr: number of input buckets
preconditions:
heads: current bucket (1 << 18) [special value for no bucket] and -1 elements (first write allocates bucket)
out_cnts: 0
buckets_used= number of partitions (first num_parts buckets are reserved)
*/
__global__ void partition_pass_two (
const int32_t * __restrict__ S,
const int32_t * __restrict__ P,
const uint32_t * __restrict__ bucket_info,
uint32_t * __restrict__ buckets_used,
uint64_t * heads,
uint32_t * __restrict__ chains,
uint32_t * __restrict__ out_cnts,
int32_t * __restrict__ output_S,
int32_t * __restrict__ output_P,
uint32_t S_log_parts,
uint32_t log_parts,
uint32_t first_bit,
uint32_t * bucket_num_ptr) {
assert((((size_t) bucket_size) + ((size_t) blockDim.x) * gridDim.x) < (((size_t) 1) << 32));
const uint32_t S_parts = 1 << S_log_parts;
const uint32_t parts = 1 << log_parts;
const int32_t parts_mask = parts - 1;
uint32_t buckets_num = *bucket_num_ptr;
uint32_t * router = (uint32_t *) int_shared; //[1024*4 + parts];
for (size_t j = threadIdx.x ; j < parts ; j += blockDim.x )
router[1024*4 + parts + j] = 0;
if (threadIdx.x == 0)
router[0] = 0;
__syncthreads();
/*each CUDA block processes a bucket at a time*/
for (size_t i = blockIdx.x; i < buckets_num; i += gridDim.x) {
uint32_t info = bucket_info[i];
/*number of elements per bucket*/
uint32_t cnt = info & ((1 << 13) - 1);
/*id of original partition*/
uint32_t pid = info >> 13;
vec4 thread_vals = *(reinterpret_cast<const vec4 *>(S + bucket_size * i + 4*threadIdx.x));
uint32_t thread_keys[4];
/*compute local histogram for the bucket*/
#pragma unroll
for (int k = 0 ; k < 4 ; ++k){
if (4*threadIdx.x + k < cnt){
uint32_t partition = (hasht(thread_vals.i[k]) >> first_bit) & parts_mask;
atomicAdd(router + (1024 * 4 + parts + partition), 1);
thread_keys[k] = partition;
} else {
thread_keys[k] = 0;
}
}
__syncthreads();
for (size_t j = threadIdx.x; j < parts ; j += blockDim.x ) {
uint32_t cnt = router[1024 * 4 + parts + j];
if (cnt > 0){
atomicAdd(out_cnts + (pid << log_parts) + j, cnt);
uint32_t pcnt ;
uint32_t bucket ;
uint32_t next_buck;
bool repeat = true;
while (__any(repeat)){
if (repeat){
uint64_t old_heads = atomicAdd(heads + (pid << log_parts) + j, ((uint64_t) cnt) << 32);
atomicMin(heads + (pid << log_parts) + j, ((uint64_t) (2*bucket_size)) << 32);
pcnt = ((uint32_t) (old_heads >> 32));
bucket = (uint32_t) old_heads ;
if (pcnt < bucket_size){
if (pcnt + cnt >= bucket_size){
if (bucket < (1 << 18)) {
next_buck = atomicAdd(buckets_used, 1);
chains[bucket] = next_buck;
} else {
next_buck = (pid << log_parts) + j;
}
uint64_t tmp = next_buck + (((uint64_t) (pcnt + cnt - bucket_size)) << 32);
atomicExch(heads + (pid << log_parts) + j, tmp);
} else {
next_buck = bucket;
}
repeat = false;
}
}
}
router[1024 * 4 + j] = atomicAdd(router, cnt);
router[1024 * 4 + parts + j] = 0;
router[1024 * 4 + 2 * parts + j] = (bucket << log2_bucket_size) + pcnt;
router[1024 * 4 + 3 * parts + j] = next_buck << log2_bucket_size ;
}
}
__syncthreads();
uint32_t total_cnt = router[0];
__syncthreads();
/*calculate write positions for block-wise shuffle => atomicAdd on start of partition*/
#pragma unroll
for (int k = 0 ; k < 4 ; ++k){
if (4*threadIdx.x + k < cnt)
thread_keys[k] = atomicAdd(router + (1024 * 4 + thread_keys[k]), 1);
}
/*write the keys in shared memory*/
#pragma unroll
for (int k = 0 ; k < 4 ; ++k)
if (4*threadIdx.x + k < cnt)
router[thread_keys[k]] = thread_vals.i[k];
__syncthreads();
int32_t thread_parts[4];
/*read shuffled keys and write them to output partitions "somewhat" coalesced*/
#pragma unroll
for (int k = 0 ; k < 4 ; ++k){
if (threadIdx.x + 1024 * k < total_cnt) {
int32_t val = router[threadIdx.x + 1024 * k];
uint32_t partition = (hasht(val) >> first_bit) & parts_mask;
uint32_t cnt = router[1024 * 4 + partition] - (threadIdx.x + 1024 * k);
uint32_t bucket = router[1024 * 4 + 2 * parts + partition];
if (((bucket + cnt) ^ bucket) & ~bucket_size_mask){
uint32_t next_buck = router[1024 * 4 + 3 * parts + partition];
cnt = ((bucket + cnt) & bucket_size_mask);
bucket = next_buck;
}
bucket += cnt;
output_S[bucket] = val;
thread_parts[k] = partition;
}
}
__syncthreads();
/*read payloads of original data*/
thread_vals = *(reinterpret_cast<const vec4 *>(P + i*bucket_size + 4*threadIdx.x));
/*shuffle payloads in shared memory, in the same offsets that we used for their corresponding keys*/
#pragma unroll
for (int k = 0 ; k < 4 ; ++k)
if (4*threadIdx.x + k < cnt) {
router[thread_keys[k]] = thread_vals.i[k];
}
__syncthreads();
/*write payloads to partition buckets in "somewhat coalesced manner"*/
#pragma unroll
for (int k = 0 ; k < 4 ; ++k){
if (threadIdx.x + 1024 * k < total_cnt) {
int32_t val = router[threadIdx.x + 1024 * k];
int32_t partition = thread_parts[k];
uint32_t cnt = router[1024 * 4 + partition] - (threadIdx.x + 1024 * k);
uint32_t bucket = router[1024 * 4 + 2 * parts + partition];
if (((bucket + cnt) ^ bucket) & ~bucket_size_mask){
uint32_t next_buck = router[1024 * 4 + 3 * parts + partition];
cnt = ((bucket + cnt) & bucket_size_mask);
bucket = next_buck;
}
bucket += cnt;
output_P[bucket] = val;
}
}
if (threadIdx.x == 0) router[0] = 0;
}
}
#define LOCAL_BUCKETS_BITS 10
#define LOCAL_BUCKETS ((1 << LOCAL_BUCKETS_BITS))
#define MAX_BIT 32
__device__ int ctzd (int x) {
if (x == 0)
return 32;
int n = 0;
if ((n & 0x0000FFFF) == 0) {
n += 16;
x >>= 16;
}
if ((n & 0x000000FF) == 0) {
n += 8;
x >>= 8;
}
if ((n & 0x0000000F) == 0) {
n += 4;
x >>= 4;
}
if ((n & 0x00000003) == 0) {
n += 2;
x >>= 2;
}
if ((n & 0x00000001) == 0) {
n += 1;
x >>= 1;
}
return n;
}
__global__ void init_metadata_double (
uint64_t * __restrict__ heads1,
uint32_t * __restrict__ buckets_used1,
uint32_t * __restrict__ chains1,
uint32_t * __restrict__ out_cnts1,
uint32_t parts1,
uint32_t buckets_num1,
uint64_t * __restrict__ heads2,
uint32_t * __restrict__ buckets_used2,
uint32_t * __restrict__ chains2,
uint32_t * __restrict__ out_cnts2,
uint32_t parts2,
uint32_t buckets_num2
) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < buckets_num1; i += blockDim.x*gridDim.x)
chains1[i] = 0;
for (int i = tid; i < parts1; i += blockDim.x*gridDim.x)
out_cnts1[i] = 0;
for (int i = tid; i < parts1; i += blockDim.x*gridDim.x)
heads1[i] = (1 << 18) + (((uint64_t) bucket_size_mask) << 32);
if (tid == 0) {
*buckets_used1 = parts1;
}
for (int i = tid; i < buckets_num2; i += blockDim.x*gridDim.x)
chains2[i] = 0;
for (int i = tid; i < parts2; i += blockDim.x*gridDim.x)
out_cnts2[i] = 0;
for (int i = tid; i < parts2; i += blockDim.x*gridDim.x)
heads2[i] = (1 << 18) + (((uint64_t) bucket_size_mask) << 32);
if (tid == 0) {
*buckets_used2 = parts2;
}
}
/*
Building phase for non-partitioned hash join with perfect hashing (so this property is reflected in the code, we don't follow chains), it is the best case for non-partitioned
data=array of the keys
payload=array of payloads
n=number of tuples
lookup=lookup table/hashtable that we build => we store the payload at position lookup[key]
*/
__global__ void build_perfect_array (int32_t* data, int32_t* payload, int n, int32_t* lookup) {
for (size_t i = 4 *(threadIdx.x + blockIdx.x * blockDim.x); i < n ; i += 4 * blockDim.x * gridDim.x){
vec4 thread_vals = *(reinterpret_cast<const vec4 *>(data + i));
vec4 thread_payloads = *(reinterpret_cast<const vec4 *>(payload + i));
#pragma unroll
for (int k = 0; k < 4; ++k) {
int32_t val = thread_vals.i[k];
int32_t payload = thread_payloads.i[k];
lookup[val] = payload + 1;
}
}
}
/*Probing phase for non-partitioned hash join with perfect hashing
data=keys for probe side
payload=payloads for probe side
n=number of elements
lookup=hashtable
aggr=the memory location in which we aggregate with atomics at the end*/
__global__ void probe_perfect_array (int32_t* data, int32_t* payload, int n, int32_t* lookup, int* aggr) {
int count = 0;
for (size_t i = 4 *(threadIdx.x + blockIdx.x * blockDim.x); i < n ; i += 4 * blockDim.x * gridDim.x){
vec4 thread_vals = *(reinterpret_cast<const vec4 *>(data + i));
vec4 thread_payloads = *(reinterpret_cast<const vec4 *>(payload + i));
#pragma unroll
for (int k = 0; k < 4; ++k) {
int val = thread_vals.i[k];
int payload = thread_payloads.i[k];
int res = lookup[val];
if (res)
count += (payload * (res - 1));
}
}
atomicAdd(aggr, count);
}
/*
Building phase for non-partitioned hash join with chaining
data=array of the keys
payload=array of payloads
n=number of tuples
log_parts=log size of hashtable/chains
output=the chains [the rest of the array stays in place]
head=the first element of each chain
*/
__global__ void build_ht_chains (int32_t* data, int n, uint32_t log_parts, int32_t* output, int* head) {
int parts = 1 << log_parts;
int parts_mask = parts-1;
for (size_t i = 4 *(threadIdx.x + blockIdx.x * blockDim.x); i < n ; i += 4 * blockDim.x * gridDim.x){
vec4 thread_vals = *(reinterpret_cast<const vec4 *>(data + i));
#pragma unroll
for (int k = 0; k < 4; ++k) {
int val = thread_vals.i[k];
int hval = val & parts_mask;
int last = atomicExch(head + hval, i+k+1);
//int64_t wr = (((int64_t) last) << 32) + val;
output[i + k] = last;
}
}
}
/*
Probing phase for non-partitioned hash join with chaining
data=array of the keys
payload=array of payloads
n=number of tuples
log_parts=log size of hashtable/chains
ht=the chains that show the successor for each build element
head=the first element of each chain
ht_key=the keys of the hashtable as an array
ht_pay=the payloads of the hashtable as an array
aggr=the memory location in which we aggregate with atomics at the end
*/
__global__ void chains_probing (int32_t* data, int32_t* payload, int n, uint32_t log_parts, int32_t* ht, int32_t* ht_key, int32_t* ht_pay, int* head, int* aggr) {
int parts = 1 << log_parts;
int parts_mask = parts-1;
int count = 0;
for (size_t i = 4 *(threadIdx.x + blockIdx.x * blockDim.x); i < n ; i += 4 * blockDim.x * gridDim.x){
vec4 thread_vals = *(reinterpret_cast<const vec4 *>(data + i));
vec4 thread_payloads = *(reinterpret_cast<const vec4 *>(payload + i));
#pragma unroll
for (int k = 0; k < 4; ++k) {
int val = thread_vals.i[k];
int payload = thread_payloads.i[k];
int hval = val & parts_mask;
int next = head[hval];
while (next != 0) {
int ht_val = ht_key[next-1];
if (ht_val == val)
count += (payload * ht_pay[next-1]);
next = ht[next-1];
}
}
}
atomicAdd(aggr, count);
}
/*functions for linear probing
FIXME: there is a bug so it is not operational yet [was not in paper so this is not urgent]
*/
__global__ void ht_hist (int* data, int n, int log_parts, int* hist) {
int parts = 1 << log_parts;
int parts_mask = parts-1;
for (size_t i = 4 *(threadIdx.x + blockIdx.x * blockDim.x); i < n ; i += 4 * blockDim.x * gridDim.x){
vec4 thread_vals = *(reinterpret_cast<const vec4 *>(data + i));
#pragma unroll
for (int k = 0; k < 4; ++k) {
int val = thread_vals.i[k];
int hval = val & parts_mask;
int off = atomicAdd(hist + hval, 1);
}
}
}
__global__ void ht_offsets (int log_parts, int* hist, int* offset, int* aggr) {
int parts = 1 << log_parts;
int parts_mask = parts-1;
for (size_t i = threadIdx.x + blockIdx.x * blockDim.x; i < parts; i += blockDim.x * gridDim.x) {
int cur = hist[i];
int off = atomicAdd(aggr, cur);
hist[i] = off;
offset[i] = off;
}
}
__global__ void build_ht_linear (int* data, int* payload, size_t n, int log_parts, int* offset, int* ht, int* htp) {
int parts = 1 << log_parts;
int parts_mask = parts-1;
for (size_t i = 4 *(threadIdx.x + blockIdx.x * blockDim.x); i < n ; i += 4 * blockDim.x * gridDim.x){
vec4 thread_vals = *(reinterpret_cast<const vec4 *>(data + i));
vec4 thread_payloads = *(reinterpret_cast<const vec4 *>(payload + i));
#pragma unroll
for (int k = 0; k < 4; ++k) {
int val = thread_vals.i[k];
int hval = val & parts_mask;
int off = atomicAdd(offset + hval, 1);
ht[off] = val;
htp[off] = thread_payloads.i[k];
}
}
}
__global__ void linear_probing (int* data, int* payload, int* ht, int* htp, int* offset_s, int* offset_e, size_t n, int log_parts, int* aggr) {
int parts = 1 << log_parts;
int parts_mask = parts-1;
int count = 0;
for (size_t i = 4 *(threadIdx.x + blockIdx.x * blockDim.x); i < n ; i += 4 * blockDim.x * gridDim.x){
vec4 thread_vals = *(reinterpret_cast<const vec4 *>(data + i));
vec4 thread_payloads = *(reinterpret_cast<const vec4 *>(payload + i));
#pragma unroll
for (int k = 0; k < 4; ++k) {
int val = thread_vals.i[k];
for (int j = 0; j < 32; j++) {
int probe = __shfl(val, j);
int pay = __shfl(thread_payloads.i[k], j);
int hval = probe & parts_mask;
int start = offset_s[hval];
int end = offset_e[hval];
for (int p = start + threadIdx.x % 32; p < end; p += 32) {
if (ht[p] == probe) {
count += pay*htp[p];
}
}
}
}
}
atomicAdd(aggr, count);
}
/*break "long" bucket chains to smaller chains
this helps load balancing because we can allocate work at sub-chain granularity
and effectively solve the skew problem
bucket_info=we store the packed (partition, element count) value for each bucket
chains=successor in partition's bucket list
out_cnts=count of elements in this partition
log_parts= log of number of partitions
threshold=the maximum number of elements per subchain*/
__global__ void decompose_chains (uint32_t* bucket_info, uint32_t* chains, uint32_t* out_cnts, uint32_t log_parts, int threshold) {
uint32_t parts = 1 << log_parts;
for (int p = threadIdx.x + blockIdx.x*blockDim.x; p < parts; p += gridDim.x*blockDim.x) {
uint32_t cur = p;
int32_t cnt = out_cnts[p];
uint32_t first_cnt = (cnt >= threshold)? threshold : cnt;
int32_t cutoff = 0;
while (cnt > 0) {
cutoff += bucket_size;
cnt -= bucket_size;
uint32_t next = chains[cur];
if (cutoff >= threshold && cnt > 0) {
uint32_t local_cnt = (cnt >= threshold)? threshold : cnt;
bucket_info[next] = (p << 15) + local_cnt;
chains[cur] = 0;
cutoff = 0;
} else if (next != 0) {
bucket_info[next] = 0;
}
cur = next;
}
bucket_info[p] = (p << 15) + first_cnt;
}
}
/*kernel for performing the join between the partitioned relations
R,Pr= bucketized keys and payloads for relation R (probe side)
S,Ps= buckerized keys and payloads for relation S (build side)
bucket_info=the info that tells us which partition each bucket belongs to, the number of elements (or whether it belongs to a chain)
S_cnts, S_chain= for build-side we don't pack the info since we operate under the assumption that it is usually one bucket per partition (we don't load balance)
buckets_num=number of buckets for R
results=the memory address where we aggregate
*/
__global__ void join_partitioned_aggregate (
const int32_t* R,
const int32_t* Pr,
const uint32_t* R_chain,
const uint32_t* bucket_info,
const int32_t* S,
const int32_t* Ps,
const uint32_t* S_cnts,
const uint32_t* S_chain,
int32_t log_parts,
uint32_t* buckets_num,
int32_t* results) {
/*in order to saze space, we discard the partitioning bits, then we can try fitting keys in int16_t [HACK]*/
__shared__ int16_t elem[4096 + 512];
__shared__ int32_t payload[4096 + 512];
__shared__ int16_t next[4096 + 512];
__shared__ int32_t head[LOCAL_BUCKETS];
int tid = threadIdx.x;
int block = blockIdx.x;
int width = blockDim.x;
int pwidth = gridDim.x;
int parts = 1 << log_parts;
int lid = tid % 32;
int gnum = blockDim.x/32;
int count = 0;
int buckets_cnt = *buckets_num;
for (uint32_t bucket_r = block; bucket_r < buckets_cnt; bucket_r += pwidth) {
int info = bucket_info[bucket_r];
if (info != 0) {
/*unpack information on the subchain*/
int p = info >> 15;
int len_R = info & ((1 << 15) - 1);
int len_S = S_cnts[p];
/*S partition doesn't fit in shared memory*/
if (len_S > 4096+512) {
int bucket_r_loop = bucket_r;
/*now we will build a bucket of R side in the shared memory at a time and then probe it with S-side
sensible because
1) we have guarantees on size of R from the chain decomposition
2) this is a skewed scenario so size of S can be arbitrary*/
for (int offset_r = 0; offset_r < len_R; offset_r += bucket_size) {
for (int i = tid; i < LOCAL_BUCKETS; i += blockDim.x)
head[i] = -1;
__syncthreads();
/*build a hashtable from an R bucket*/
for (int base_r = 0; base_r < bucket_size; base_r += 4*blockDim.x) {
vec4 data_R = *(reinterpret_cast<const vec4 *>(R + bucket_size * bucket_r_loop + base_r + 4*threadIdx.x));
vec4 data_Pr = *(reinterpret_cast<const vec4 *>(Pr + bucket_size * bucket_r_loop + base_r + 4*threadIdx.x));
int l_cnt_R = len_R - offset_r - base_r - 4 * threadIdx.x;
int cnt = 0;
#pragma unroll
for (int k = 0; k < 4; k++) {
if (k < l_cnt_R) {
int val = data_R.i[k];
elem[base_r + k*blockDim.x + tid] = (int16_t) (val >> (LOCAL_BUCKETS_BITS + log_parts));
payload[base_r + k*blockDim.x + tid] = data_Pr.i[k];
int hval = (val >> log_parts) & (LOCAL_BUCKETS - 1);
int32_t last = atomicExch(&head[hval], base_r + k*blockDim.x + tid);
next[base_r + k*blockDim.x + tid] = last;
}
}
}
bucket_r_loop = R_chain[bucket_r_loop];
__syncthreads();
int bucket_s_loop = p;
int base_s = 0;
/*probe hashtable from an S bucket*/
for (int offset_s = 0; offset_s < len_S; offset_s += 4*blockDim.x) {
vec4 data_S = *(reinterpret_cast<const vec4 *>(S + bucket_size * bucket_s_loop + base_s + 4*threadIdx.x));
vec4 data_Ps = *(reinterpret_cast<const vec4 *>(Ps + bucket_size * bucket_s_loop + base_s + 4*threadIdx.x));
int l_cnt_S = len_S - offset_s - 4 * threadIdx.x;
#pragma unroll
for (int k = 0; k < 4; k++) {
int32_t val = data_S.i[k];
int32_t pval = data_Ps.i[k];
int16_t tval = (int16_t) (val >> (LOCAL_BUCKETS_BITS + log_parts));
int32_t hval = (val >> log_parts) & (LOCAL_BUCKETS - 1);
if (k < l_cnt_S) {
int32_t pos = head[hval];
while (pos >= 0) {
if (elem[pos] == tval) {
count += pval*payload[pos];
}
pos = next[pos];
}
}
}
base_s += 4*blockDim.x;
if (base_s >= bucket_size) {
bucket_s_loop = S_chain[bucket_s_loop];
base_s = 0;
}
}
__syncthreads();
}
} else {
for (int i = tid; i < LOCAL_BUCKETS; i += blockDim.x)
head[i] = -1;
int rem_s = len_S % 4096;
rem_s = (rem_s + 4 - 1)/4;
__syncthreads();
int off;
int it;
int base = 0;
it = p;
off = 0;
/*build hashtable for S-side*/
for (off = 0; off < len_S;) {
vec4 data_S = *(reinterpret_cast<const vec4 *>(S + bucket_size * it + base + 4*threadIdx.x));
vec4 data_Ps = *(reinterpret_cast<const vec4 *>(Ps + bucket_size * it + base +4*threadIdx.x));
int l_cnt_S = len_S - off - 4 * threadIdx.x;
#pragma unroll
for (int k = 0; k < 4; k++) {
if (k < l_cnt_S) {
int val = data_S.i[k];
elem[off + tid] = (int16_t) (val >> (LOCAL_BUCKETS_BITS + log_parts));
payload[off + tid] = data_Ps.i[k];
int hval = (val >> log_parts) & (LOCAL_BUCKETS - 1);
int32_t last = atomicExch(&head[hval], off + tid);
next[off + tid] = last;
}
off += (off < bucket_size)? blockDim.x : rem_s;
}
if (base >= bucket_size) {
it = S_chain[it];
base = 0;
}
}
__syncthreads();
it = bucket_r;
off = 0;
/*probe from R-side*/
for (; 0 < len_R; off += 4*blockDim.x, len_R -= 4*blockDim.x) {
vec4 data_R = *(reinterpret_cast<const vec4 *>(R + bucket_size * it + off + 4*threadIdx.x));
vec4 data_Pr = *(reinterpret_cast<const vec4 *>(Pr + bucket_size * it + off + 4*threadIdx.x));
int l_cnt_R = len_R - 4 * threadIdx.x;
#pragma unroll
for (int k = 0; k < 4; k++) {
int32_t val = data_R.i[k];
int32_t pval = data_Pr.i[k];
/*hack to fit more data in shared memory*/
int16_t tval = (int16_t) (val >> (LOCAL_BUCKETS_BITS + log_parts));
int32_t hval = (val >> log_parts) & (LOCAL_BUCKETS - 1);
if (k < l_cnt_R) {
int32_t pos = head[hval];
while (pos >= 0) {
if (elem[pos] == tval) {
count += pval*payload[pos];
}
pos = next[pos];
}
}
}
if (off >= bucket_size) {
it = R_chain[it];
off = 0;
}
}
__syncthreads();
}
}
}
atomicAdd(results, count);
__syncthreads();
}
/*maximum size of output, we always write at *write_offset MOD (FOLD+1)*
we use it in order to simulate the cases that output size explodes. we do the actual writes then overwrite them*/
#define FOLD ((1 << 24) - 1)
/*the number of elements that can be stored in a warp-level buffer during the join materialization*/
#define SHUFFLE_SIZE 16
/*practically the same as join_partitioned_aggregate
i add extra comments for the materialization technique*/
__global__ void join_partitioned_results (
const int32_t* R,
const int32_t* Pr,
const uint32_t* R_chain,
const uint32_t* bucket_info,
const int32_t* S,
const int32_t* Ps,
const uint32_t* S_cnts,
const uint32_t* S_chain,
int32_t log_parts,
uint32_t* buckets_num,
int32_t* results,
int32_t* output) {
__shared__ int16_t elem[4096 + 512];
__shared__ int32_t payload[4096 + 512];
__shared__ int16_t next[4096 + 512];
__shared__ int32_t head[LOCAL_BUCKETS];
__shared__ int32_t shuffle[2*SHUFFLE_SIZE*32];
int tid = threadIdx.x;
int block = blockIdx.x;
int width = blockDim.x;
int pwidth = gridDim.x;
int parts = 1 << log_parts;
int lid = tid % 32;
int gid = tid / 32;
int gnum = blockDim.x/32;
int count = 0;
int ptr;
int threadmask = (lid < 31)? ~((1 << (lid+1)) - 1) : 0;
int shuffle_ptr = 0;
int32_t* warp_shuffle = shuffle + gid * 2 * SHUFFLE_SIZE;
int buckets_cnt = *buckets_num;
for (uint32_t bucket_r = block; bucket_r < buckets_cnt; bucket_r += pwidth) {
int info = bucket_info[bucket_r];
if (info != 0) {
int p = info >> 15;
int len_R = info & ((1 << 15) - 1);
int len_S = S_cnts[p];
if (len_S > 4096+512) {
int bucket_r_loop = bucket_r;
for (int offset_r = 0; offset_r < len_R; offset_r += bucket_size) {
for (int i = tid; i < LOCAL_BUCKETS; i += blockDim.x)
head[i] = -1;
__syncthreads();
for (int base_r = 0; base_r < bucket_size; base_r += 4*blockDim.x) {
vec4 data_R = *(reinterpret_cast<const vec4 *>(R + bucket_size * bucket_r_loop + base_r + 4*threadIdx.x));
vec4 data_Pr = *(reinterpret_cast<const vec4 *>(Pr + bucket_size * bucket_r_loop + base_r + 4*threadIdx.x));
int l_cnt_R = len_R - offset_r - base_r - 4 * threadIdx.x;
int cnt = 0;
#pragma unroll
for (int k = 0; k < 4; k++) {
if (k < l_cnt_R) {
int val = data_R.i[k];
elem[base_r + k*blockDim.x + tid] = (int16_t) (val >> (LOCAL_BUCKETS_BITS + log_parts));
payload[base_r + k*blockDim.x + tid] = data_Pr.i[k];
int hval = (val >> log_parts) & (LOCAL_BUCKETS - 1);
int32_t last = atomicExch(&head[hval], base_r + k*blockDim.x + tid);
next[base_r + k*blockDim.x + tid] = last;
}
}
}
bucket_r_loop = R_chain[bucket_r_loop];
__syncthreads();
int bucket_s_loop = p;
int base_s = 0;
for (int offset_s = 0; offset_s < len_S; offset_s += 4*blockDim.x) {
vec4 data_S = *(reinterpret_cast<const vec4 *>(S + bucket_size * bucket_s_loop + base_s + 4*threadIdx.x));
vec4 data_Ps = *(reinterpret_cast<const vec4 *>(Ps + bucket_size * bucket_s_loop + base_s + 4*threadIdx.x));
int l_cnt_S = len_S - offset_s - 4 * threadIdx.x;
#pragma unroll
for (int k = 0; k < 4; k++) {
int32_t val = data_S.i[k];
int32_t pval = data_Ps.i[k];
int16_t tval = (int16_t) (val >> (LOCAL_BUCKETS_BITS + log_parts));
int32_t hval = (val >> log_parts) & (LOCAL_BUCKETS - 1);
int32_t pay;
int32_t pos = (k < l_cnt_S)? head[hval] : -1;
/*check at warp level whether someone is still following chain => this way we can shuffle without risk*/
int pred = (pos >= 0);
while (__any(pred)) {
int wr_intention = 0;
/*we have a match, fetch the data to be written*/
if (pred) {
if (elem[pos] == tval) {
pay = payload[pos];
wr_intention = 1;
count++;
}
pos = next[pos];
pred = (pos >= 0);
}
/*find out who had a match in this execution step*/
int mask = __ballot(wr_intention);
/*our software managed buffer will overflow, flush it*/
int wr_offset = shuffle_ptr + __popc(mask & threadmask);
shuffle_ptr = shuffle_ptr + __popc(mask);
/*while it overflows, flush
we flush 16 keys and then the 16 corresponding payloads consecutively, of course other formats might be friendlier*/
while (shuffle_ptr >= SHUFFLE_SIZE) {
if (wr_intention && (wr_offset < SHUFFLE_SIZE)) {
warp_shuffle[wr_offset] = pay;
warp_shuffle[wr_offset+SHUFFLE_SIZE] = pval;
wr_intention = 0;
}
if (lid == 0) {
ptr = atomicAdd(results, 2*SHUFFLE_SIZE);
ptr = ptr & FOLD;
}
ptr = __shfl(ptr, 0);
output[ptr + lid] = warp_shuffle[lid];
wr_offset -= SHUFFLE_SIZE;
shuffle_ptr -= SHUFFLE_SIZE;
}
/*now the fit, write them in buffer*/
if (wr_intention && (wr_offset >= 0)) {
warp_shuffle[wr_offset] = pay;
warp_shuffle[wr_offset+SHUFFLE_SIZE] = pval;
wr_intention = 0;
}
}
}
base_s += 4*blockDim.x;
if (base_s >= bucket_size) {
bucket_s_loop = S_chain[bucket_s_loop];
base_s = 0;
}
}
__syncthreads();
}
} else {
for (int i = tid; i < LOCAL_BUCKETS; i += blockDim.x)
head[i] = -1;
int rem_s = len_S % 4096;
rem_s = (rem_s + 4 - 1)/4;
__syncthreads();
int off;
int it;
int base = 0;
it = p;
off = 0;
for (off = 0; off < len_S;) {
vec4 data_S = *(reinterpret_cast<const vec4 *>(S + bucket_size * it + base + 4*threadIdx.x));
vec4 data_Ps = *(reinterpret_cast<const vec4 *>(Ps + bucket_size * it + base +4*threadIdx.x));
int l_cnt_S = len_S - off - 4 * threadIdx.x;
#pragma unroll
for (int k = 0; k < 4; k++) {
if (k < l_cnt_S) {
int val = data_S.i[k];
elem[off + tid] = (int16_t) (val >> (LOCAL_BUCKETS_BITS + log_parts));
payload[off + tid] = data_Ps.i[k];
int hval = (val >> log_parts) & (LOCAL_BUCKETS - 1);
int32_t last = atomicExch(&head[hval], off + tid);
next[off + tid] = last;
}
off += (off < bucket_size)? blockDim.x : rem_s;
base += blockDim.x;
}
if (base >= bucket_size) {
it = S_chain[it];
base = 0;
}
}
__syncthreads();
it = bucket_r;
off = 0;
for (; 0 < len_R; off += 4*blockDim.x, len_R -= 4*blockDim.x) {
int l_cnt_R = len_R - 4 * threadIdx.x;
vec4 data_R;
vec4 data_Pr;
data_R = *(reinterpret_cast<const vec4 *>(R + bucket_size * it + off + 4*threadIdx.x));
data_Pr = *(reinterpret_cast<const vec4 *>(Pr + bucket_size * it + off + 4*threadIdx.x));
#pragma unroll
for (int k = 0; k < 4; k++) {
int32_t val = data_R.i[k];
int32_t pval = data_Pr.i[k];
int16_t tval = (int16_t) (val >> (LOCAL_BUCKETS_BITS + log_parts));
int32_t hval = (val >> log_parts) & (LOCAL_BUCKETS - 1);
int32_t pay;
int32_t pos = (k < l_cnt_R)? head[hval] : -1;
/*same as previous code block*/
int pred = (pos >= 0);
while (__any(pred)) {
int wr_intention = 0;
if (pred) {
if (elem[pos] == tval) {
pay = payload[pos];
wr_intention = 1;
count++;
}
pos = next[pos];
pred = (pos >= 0);
}
int mask = __ballot(wr_intention);
int wr_offset = shuffle_ptr + __popc(mask & threadmask);
shuffle_ptr = shuffle_ptr + __popc(mask);
while (shuffle_ptr >= SHUFFLE_SIZE) {
if (wr_intention && (wr_offset < SHUFFLE_SIZE)) {
warp_shuffle[wr_offset] = pval;
warp_shuffle[wr_offset+SHUFFLE_SIZE] = pay;
wr_intention = 0;
}
if (lid == 0) {
ptr = atomicAdd(results, 2*SHUFFLE_SIZE);
ptr = ptr & FOLD;
}
ptr = __shfl(ptr, 0);
output[ptr + lid] = warp_shuffle[lid];
wr_offset -= SHUFFLE_SIZE;
shuffle_ptr -= SHUFFLE_SIZE;
}
if (wr_intention && (wr_offset >= 0)) {
warp_shuffle[wr_offset] = pval;
warp_shuffle[wr_offset+SHUFFLE_SIZE] = pay;
wr_intention = 0;
}
}
}
if (off >= bucket_size) {
it = R_chain[it];
off = 0;
}
}
__syncthreads();
}
}
}
if (lid == 0) {
ptr = atomicAdd(results, 2*shuffle_ptr);
ptr = ptr & FOLD;
}
ptr = __shfl(ptr, 0);
if (lid < shuffle_ptr) {
output[ptr + lid] = warp_shuffle[lid];
output[ptr + lid + shuffle_ptr] = warp_shuffle[lid + SHUFFLE_SIZE];
}
__syncthreads();
}
/*again the same but payload is the virtual tuple id and we late materialize from Dx arrays which store the actual columns that we need
also here we have no overflows because if we did, we wouldn't fit the data/extra columns :) */
__global__ void join_partitioned_varpayload (
const int32_t* R,
const int32_t* Pr,
const int32_t* Dr,
const uint32_t* R_chain,
const uint32_t* bucket_info,
const int32_t* S,
const int32_t* Ps,
const int32_t* Ds,
const uint32_t* S_cnts,
const uint32_t* S_chain,
int32_t log_parts,
int32_t col_num1,
int32_t col_num2,
int32_t rel_size,
uint32_t* buckets_num,
int32_t* results) {
__shared__ int16_t elem[4096 + 512];
__shared__ int32_t payload[4096 + 512];
__shared__ int16_t next[4096 + 512];
__shared__ int32_t head[LOCAL_BUCKETS];
int tid = threadIdx.x;
int block = blockIdx.x;
int width = blockDim.x;
int pwidth = gridDim.x;
int parts = 1 << log_parts;
int lid = tid % 32;
int gnum = blockDim.x/32;
int count = 0;
int buckets_cnt = *buckets_num;
for (uint32_t bucket_r = block; bucket_r < buckets_cnt; bucket_r += pwidth) {
int info = bucket_info[bucket_r];
if (info != 0) {
int p = info >> 15;
int len_R = info & ((1 << 15) - 1);
int len_S = S_cnts[p];
for (int i = tid; i < LOCAL_BUCKETS; i += blockDim.x)
head[i] = -1;
int rem_s = len_S % 4096;
rem_s = (rem_s + 4 - 1)/4;
__syncthreads();
int off;
int it;
int base = 0;
it = p;
off = 0;
for (off = 0; off < len_S;) {
vec4 data_S = *(reinterpret_cast<const vec4 *>(S + bucket_size * it + base + 4*threadIdx.x));
vec4 data_Ps = *(reinterpret_cast<const vec4 *>(Ps + bucket_size * it + base +4*threadIdx.x));
int l_cnt_S = len_S - off - 4 * threadIdx.x;
#pragma unroll
for (int k = 0; k < 4; k++) {
if (k < l_cnt_S) {
int val = data_S.i[k];
elem[off + tid] = (int16_t) (val >> (LOCAL_BUCKETS_BITS + log_parts));
payload[off + tid] = data_Ps.i[k];
int hval = (val >> log_parts) & (LOCAL_BUCKETS - 1);
int32_t last = atomicExch(&head[hval], off + tid);
next[off + tid] = last;
}
off += (off < bucket_size)? blockDim.x : rem_s;
}
if (base >= bucket_size) {
it = S_chain[it];
base = 0;
}
}
__syncthreads();
it = bucket_r;
off = 0;
for (; 0 < len_R; off += 4*blockDim.x, len_R -= 4*blockDim.x) {
vec4 data_R = *(reinterpret_cast<const vec4 *>(R + bucket_size * it + off + 4*threadIdx.x));
vec4 data_Pr = *(reinterpret_cast<const vec4 *>(Pr + bucket_size * it + off + 4*threadIdx.x));
int l_cnt_R = len_R - 4 * threadIdx.x;
#pragma unroll
for (int k = 0; k < 4; k++) {
int32_t val = data_R.i[k];
int32_t pval = data_Pr.i[k];
int16_t tval = (int16_t) (val >> (LOCAL_BUCKETS_BITS + log_parts));
int32_t hval = (val >> log_parts) & (LOCAL_BUCKETS - 1);
if (k < l_cnt_R) {
int32_t pos = head[hval];
while (pos >= 0) {
if (elem[pos] == tval) {
int32_t bval = payload[pos];
for (int z = 0; z < col_num1; z++)
count += Dr[pval + z*rel_size];
for (int z = 0; z < col_num2; z++)
count += Ds[bval + z*rel_size];
}
pos = next[pos];
}
}
}
if (off >= bucket_size) {
it = R_chain[it];
off = 0;
}
}
__syncthreads();
}
}
atomicAdd(results, count);
__syncthreads();
}
/*late materialization and perfect hashing*/
__global__ void probe_perfect_array_varpay (int32_t* data, int32_t* Dr, int n, int32_t* lookup, int32_t* Ds, int col_num1, int col_num2, int rel_size, int* aggr) {
int count = 0;
for (size_t i = threadIdx.x + blockIdx.x * blockDim.x; i < n ; i += blockDim.x * gridDim.x) {
int val = data[i];
int payload = i;
int res = lookup[val];
if (res > 0) {
res--;
for (int z = 0; z < col_num1; z++)
count += Dr[payload + z*rel_size];
for (int z = 0; z < col_num2; z++)
count += Ds[res + z*rel_size];
}
}
atomicAdd(aggr, count);
}
/*partition and compute metadata for relation with key+payload*/
void prepare_Relation_payload (int* R, int* R_temp, int* P, int* P_temp, size_t RelsNum, uint32_t buckets_num, uint64_t* heads[2], uint32_t* cnts[2], uint32_t* chains[2], uint32_t* buckets_used[2], uint32_t log_parts1, uint32_t log_parts2, uint32_t first_bit, cudaStream_t streams, size_t* offsets_GPU, uint32_t num_threads) {
init_metadata_double<<<64, 1024, 0, streams>>> (
heads[0], buckets_used[0], chains[0], cnts[0], 1 << log_parts1, buckets_num,
heads[1], buckets_used[1], chains[1], cnts[1], 1 << (log_parts1 + log_parts2), buckets_num
);
partition_pass_one <<<64, 1024, (1024*4 + 4*(1 << log_parts1)) * sizeof(int32_t) + (4*num_threads+2)*sizeof(size_t), streams>>>(
R, P,
offsets_GPU,
heads[0],
buckets_used[0],
chains[0],
cnts[0],
R_temp, P_temp,
RelsNum,
log_parts1,
first_bit + log_parts2,
num_threads
);
compute_bucket_info <<<64, 1024, 0, streams>>> (chains[0], cnts[0], log_parts1);
partition_pass_two <<<64, 1024, (1024*4 + 4*(1 << log_parts2)) * sizeof(int32_t) + ((2 * (1 << log_parts2) + 1)* sizeof(int32_t)), streams>>>(
R_temp, P_temp,
chains[0],
buckets_used[1], heads[1], chains[1], cnts[1],
R, P,
log_parts1, log_parts2, first_bit,
buckets_used[0]);
}
/*partition and compute metadata for relation with key+payload. We use different buffers at the end (it makes sense for UVA based techniques)*/
void prepare_Relation_payload_triple (int* R, int* R_temp, int* R_final, int* P, int* P_temp, int* P_final, size_t RelsNum, uint32_t buckets_num, uint64_t* heads[2], uint32_t* cnts[2], uint32_t* chains[2], uint32_t* buckets_used[2], uint32_t log_parts1, uint32_t log_parts2, uint32_t first_bit, cudaStream_t streams, size_t* offsets_GPU, uint32_t num_threads) {
init_metadata_double<<<64, 1024, 0, streams>>> (
heads[0], buckets_used[0], chains[0], cnts[0], 1 << log_parts1, buckets_num,
heads[1], buckets_used[1], chains[1], cnts[1], 1 << (log_parts1 + log_parts2), buckets_num
);
partition_pass_one <<<64, 1024, (1024*4 + 4*(1 << log_parts1)) * sizeof(int32_t) + (4*num_threads+2)*sizeof(size_t), streams>>>(
R, P,
offsets_GPU,
heads[0],
buckets_used[0],
chains[0],
cnts[0],
R_temp, P_temp,
RelsNum,
log_parts1,
first_bit + log_parts2,
num_threads
);
CHK_ERROR(cudaDeviceSynchronize());
compute_bucket_info <<<64, 1024, 0, streams>>> (chains[0], cnts[0], log_parts1);
partition_pass_two <<<64, 1024, (1024*4 + 4*(1 << log_parts2)) * sizeof(int32_t) + ((2 * (1 << log_parts2) + 1)* sizeof(int32_t)), streams>>>(
R_temp, P_temp,
chains[0],
buckets_used[1], heads[1], chains[1], cnts[1],
R_final, P_final,
log_parts1, log_parts2, first_bit,
buckets_used[0]);
}
template <typename Tv>
struct chain_iterator_ref_generic{
Tv x ;
int cnt;
};
template <typename T, typename Tv>
class chain_iterator_generic{
private:
const T * __restrict__ S_parts ;
const uint32_t * __restrict__ S_chains ;
const uint32_t cnt ;
const T * __restrict__ ptr ;
uint32_t current_bucket ;
uint32_t next_bucket ;
uint32_t i ;
public:
__device__ __forceinline__ chain_iterator_generic(
const T * __restrict__ S_parts ,
const uint32_t * __restrict__ S_cnts ,
const uint32_t * __restrict__ S_chains ,
uint32_t current_partition):
S_parts(S_parts + (16/sizeof(T)) * threadIdx.x), S_chains(S_chains),
cnt((S_cnts[current_partition]/((16/sizeof(T)) * blockDim.x))*(16/sizeof(T)) + max(((int32_t) (S_cnts[current_partition] % ((16/sizeof(T)) * blockDim.x))) - ((int32_t) ((16/sizeof(T)) * threadIdx.x)), 0)),
ptr(S_parts + ((size_t) current_partition << log2_bucket_size) + (16/sizeof(T)) * threadIdx.x),
current_bucket(current_partition),
next_bucket(S_chains[current_partition]),
i(0){}
__device__ __forceinline__ chain_iterator_generic(
const uint32_t * __restrict__ S_cnts,
uint32_t current_partition):
cnt(0),
i(((S_cnts[current_partition] + (16/sizeof(T)) * blockDim.x - 1)/((16/sizeof(T)) * blockDim.x))*(16/sizeof(T))){}
__device__ __forceinline__ chain_iterator_generic<T, Tv>& operator++(){
i += (16/sizeof(T));// * blockDim.x;
ptr += (16/sizeof(T)) * blockDim.x;
if ((i * blockDim.x) & bucket_size_mask) return *this;
current_bucket = next_bucket;//int_shared[0];
ptr = S_parts + (current_bucket << log2_bucket_size);
next_bucket = S_chains[next_bucket];
return *this;
}
__device__ __forceinline__ chain_iterator_ref_generic<Tv> operator*() const {
chain_iterator_ref_generic<Tv> tmp;
tmp.x = *reinterpret_cast<const Tv *>(ptr);
tmp.cnt = cnt - i;
return tmp;
}
__device__ __forceinline__ bool operator!=(const chain_iterator_generic<T, Tv>& o){
return i != o.i;
}
};
template <typename T, typename Tv>
class chain_generic{
private:
const T * __restrict__ S_parts ;
const uint32_t * __restrict__ S_cnts ;
const uint32_t * __restrict__ S_chains ;
const uint32_t partition;
public:
__device__ __host__ __forceinline__ chain_generic(
const T * __restrict__ S_parts ,
const uint32_t * __restrict__ S_cnts ,
const uint32_t * __restrict__ S_chains ,
uint32_t partition):
S_parts(S_parts), S_cnts(S_cnts), S_chains(S_chains), partition(partition){}
__device__ __forceinline__ chain_iterator_generic<T, Tv> begin() const {
return chain_iterator_generic<T, Tv>(S_parts, S_cnts, S_chains, partition);
}
__device__ __forceinline__ chain_iterator_generic<T, Tv> end() const {
return chain_iterator_generic<T, Tv>(S_cnts, partition);
}
};
template <typename T, typename Tv>
class chains_generic {
private:
const T * __restrict__ S_parts ;
const uint32_t * __restrict__ S_cnts ;
const uint32_t * __restrict__ S_chains ;
public:
__device__ __host__ __forceinline__ chains_generic(
const T * __restrict__ S_parts ,
const uint32_t * __restrict__ S_cnts ,
const uint32_t * __restrict__ S_chains ):
S_parts(S_parts), S_cnts(S_cnts), S_chains(S_chains){}
__device__ __host__ __forceinline__ chain_generic<T, Tv> get_chain(uint32_t partition) const{
return chain_generic<T, Tv>(S_parts, S_cnts, S_chains, partition);
}
__device__ __forceinline__ uint32_t get_chain_size(uint32_t partition) const{
return S_cnts[partition];
}
};
struct chain_iterator_ref{
vec4 x ;
int cnt;
};
struct chain_iterator_i_ref{
int32_t x;
bool v;
};
class chain_iterator{
private:
const int32_t * __restrict__ S_parts ;
const uint32_t * __restrict__ S_chains ;
const uint32_t cnt ;
const int32_t * __restrict__ ptr ;
uint32_t current_bucket ;
uint32_t next_bucket ;
uint32_t i ;
public:
// __device__ __forceinline__ chain_iterator(
// const int32_t * __restrict__ S_parts ,
// const uint32_t * __restrict__ S_cnts ,
// const uint32_t * __restrict__ S_chains ):
// S_parts(S_parts), S_chains(S_chains), cnt(S_cnts[blockIdx.x]), current_bucket(blockIdx.x), i(0){}
__device__ __forceinline__ chain_iterator(
const int32_t * __restrict__ S_parts ,
const uint32_t * __restrict__ S_cnts ,
const uint32_t * __restrict__ S_chains ,
uint32_t current_partition):
S_parts(S_parts + 4 * threadIdx.x), S_chains(S_chains), cnt((S_cnts[current_partition]/(4 * blockDim.x))*4 + max(((int32_t) (S_cnts[current_partition] % (4 * blockDim.x))) - ((int32_t) (4 * threadIdx.x)), 0)), ptr(S_parts + ((size_t) current_partition << log2_bucket_size) + 4 * threadIdx.x), current_bucket(current_partition), next_bucket(S_chains[current_partition]), i(0){}
// __device__ __forceinline__ chain_iterator(
// const uint32_t * __restrict__ S_cnts):
// cnt(0), i(((S_cnts[blockIdx.x] + 4 * blockDim.x - 1)/(4 * blockDim.x)) * 4 * blockDim.x){}
__device__ __forceinline__ chain_iterator(
const uint32_t * __restrict__ S_cnts,
uint32_t current_partition):
cnt(0), i(((S_cnts[current_partition] + 4 * blockDim.x - 1)/(4 * blockDim.x))*4){}
__device__ __forceinline__ chain_iterator& operator++(){
i += 4;// * blockDim.x;
ptr += 4 * blockDim.x;
if ((i * blockDim.x) & bucket_size_mask) return *this;
current_bucket = next_bucket;//int_shared[0];
ptr = S_parts + (current_bucket << log2_bucket_size);
next_bucket = S_chains[next_bucket];
return *this;
}
__device__ __forceinline__ chain_iterator_ref operator*() const {
chain_iterator_ref tmp;
tmp.x = *reinterpret_cast<const vec4 *>(ptr);
tmp.cnt = cnt - i;
return tmp;
}
__device__ __forceinline__ bool operator!=(const chain_iterator& o){
return i != o.i;
}
};
class chain_iterator_i{
private:
const int32_t * __restrict__ S_parts ;
const uint32_t * __restrict__ S_chains ;
const uint32_t cnt ;
const int32_t * __restrict__ ptr ;
uint32_t current_bucket ;
uint32_t next_bucket ;
uint32_t i ;
public:
// __device__ __forceinline__ chain_iterator_i(
// const int32_t * __restrict__ S_parts ,
// const uint32_t * __restrict__ S_cnts ,
// const uint32_t * __restrict__ S_chains ):
// S_parts(S_parts), S_chains(S_chains), cnt(S_cnts[blockIdx.x]), current_bucket(blockIdx.x), i(0){}
__device__ __forceinline__ chain_iterator_i(
const int32_t * __restrict__ S_parts ,
const uint32_t * __restrict__ S_cnts ,
const uint32_t * __restrict__ S_chains ,
uint32_t current_partition):
S_parts(S_parts + threadIdx.x), S_chains(S_chains), cnt((S_cnts[current_partition]/blockDim.x) + max(((int32_t) (S_cnts[current_partition] % (blockDim.x))) - ((int32_t) (threadIdx.x)), 0)), ptr(S_parts + ((size_t) current_partition << log2_bucket_size) + threadIdx.x), current_bucket(current_partition), next_bucket(S_chains[current_partition]), i(0){}
// __device__ __forceinline__ chain_iterator_i(
// const uint32_t * __restrict__ S_cnts):
// cnt(0), i(((S_cnts[blockIdx.x] + 4 * blockDim.x - 1)/(4 * blockDim.x)) * 4 * blockDim.x){}
__device__ __forceinline__ chain_iterator_i(
const uint32_t * __restrict__ S_cnts,
uint32_t current_partition):
cnt(0), i(((S_cnts[current_partition] + blockDim.x - 1)/(blockDim.x))){}
__device__ __forceinline__ chain_iterator_i& operator++(){
++i;// * blockDim.x;
ptr += blockDim.x;
if ((i * blockDim.x) & bucket_size_mask) return *this;
current_bucket = next_bucket;//int_shared[0];
ptr = S_parts + (current_bucket << log2_bucket_size);
next_bucket = S_chains[next_bucket];
return *this;
}
__device__ __forceinline__ chain_iterator_i_ref operator*() const {
chain_iterator_i_ref tmp;
tmp.x = *ptr;
tmp.v = i < cnt;
return tmp;
}
__device__ __forceinline__ bool operator!=(const chain_iterator_i& o){
return i != o.i;
}
};
class chain_i{
private:
const int32_t * __restrict__ S_parts ;
const uint32_t * __restrict__ S_cnts ;
const uint32_t * __restrict__ S_chains ;
const uint32_t partition;
public:
__device__ __host__ __forceinline__ chain_i(
const int32_t * __restrict__ S_parts ,
const uint32_t * __restrict__ S_cnts ,
const uint32_t * __restrict__ S_chains ,
uint32_t partition):
S_parts(S_parts), S_cnts(S_cnts), S_chains(S_chains), partition(partition){}
__device__ __forceinline__ chain_iterator_i begin() const {
return chain_iterator_i(S_parts, S_cnts, S_chains, partition);
}
__device__ __forceinline__ chain_iterator_i end() const {
return chain_iterator_i(S_cnts, partition);
}
};
class chain{
private:
const int32_t * __restrict__ S_parts ;
const uint32_t * __restrict__ S_cnts ;
const uint32_t * __restrict__ S_chains ;
const uint32_t partition;
public:
__device__ __host__ __forceinline__ chain(
const int32_t * __restrict__ S_parts ,
const uint32_t * __restrict__ S_cnts ,
const uint32_t * __restrict__ S_chains ,
uint32_t partition):
S_parts(S_parts), S_cnts(S_cnts), S_chains(S_chains), partition(partition){}
__device__ __forceinline__ chain_iterator begin() const {
return chain_iterator(S_parts, S_cnts, S_chains, partition);
}
__device__ __forceinline__ chain_iterator end() const {
return chain_iterator(S_cnts, partition);
}
};
class chains{
private:
const int32_t * __restrict__ S_parts ;
const uint32_t * __restrict__ S_cnts ;
const uint32_t * __restrict__ S_chains ;
public:
__device__ __host__ __forceinline__ chains(
const int32_t * __restrict__ S_parts ,
const uint32_t * __restrict__ S_cnts ,
const uint32_t * __restrict__ S_chains ):
S_parts(S_parts), S_cnts(S_cnts), S_chains(S_chains){}
__device__ __host__ __forceinline__ chain get_chain(uint32_t partition) const{
return chain(S_parts, S_cnts, S_chains, partition);
}
__device__ __host__ __forceinline__ chain_i get_chain_i(uint32_t partition) const{
return chain_i(S_parts, S_cnts, S_chains, partition);
}
__device__ __forceinline__ uint32_t get_chain_size(uint32_t partition) const{
return S_cnts[partition];
}
};
/*essentially the join_partitioned_aggregate*/
__global__ void join_partitioned_shared (
const int32_t* R,
const int32_t* Pr,
const uint32_t* R_cnts,
const uint32_t* R_chain,
const int32_t* S,
const int32_t* Ps,
const uint32_t* S_cnts,
const uint32_t* S_chain,
int32_t log_parts,
int32_t* results) {
__shared__ int16_t elem[4096 + 512];
__shared__ int32_t payload[4096 + 512];
__shared__ int16_t next[4096 + 512];
__shared__ int32_t head[LOCAL_BUCKETS];
int tid = threadIdx.x;
int block = blockIdx.x;
int width = blockDim.x;
int pwidth = gridDim.x;
int parts = 1 << log_parts;
int lid = tid % 32;
int gnum = blockDim.x/32;
int count = 0;
int pr = -1;
int ps = -1;
for (uint32_t p = block; p < parts; p += pwidth) {
int len_R = R_cnts[p];
int len_S = S_cnts[p];
if (len_S > 4096 + 512) {
/*it was a microbenchmark so I didn't code this part*/
continue;
} else {
chain R_chains(R, R_cnts, R_chain, p);
chain Pr_chains(Pr, R_cnts, R_chain, p);
chain S_chains(S, S_cnts, S_chain, p);
chain Ps_chains(Ps, S_cnts, S_chain, p);
int off = 0;
for (int i = tid; i < LOCAL_BUCKETS; i += blockDim.x)
head[i] = -1;
int rem_s = len_S % 4096;
rem_s = (rem_s + 4 - 1)/4;
__syncthreads();
chain_iterator it_S = S_chains.begin();
chain_iterator it_Ps = Ps_chains.begin();
for (;it_S != S_chains.end(); ++it_S, ++it_Ps) {
vec4 data_S = (*it_S).x;
vec4 data_Ps = (*it_Ps).x;
int l_cnt_S = (*it_S).cnt;
#pragma unroll
for (int k = 0; k < 4; k++) {
if (k < l_cnt_S) {
int val = data_S.i[k];
elem[off + tid] = (int16_t) (val >> (LOCAL_BUCKETS_BITS + log_parts));
payload[off + tid] = data_Ps.i[k];
int hval = (val >> log_parts) & (LOCAL_BUCKETS - 1);
int32_t last = atomicExch(&head[hval], off + tid);
next[off + tid] = last;
}
off += (off < 4096)? blockDim.x : rem_s;
}
}
__syncthreads();
chain_iterator it_R = R_chains.begin();
chain_iterator it_Pr = Pr_chains.begin();
for (;it_R != R_chains.end(); ++it_R, ++it_Pr) {
vec4 data_R = (*it_R).x;
vec4 data_Pr = (*it_Pr).x;
int l_cnt_R = (*it_R).cnt;
#pragma unroll
for (int k = 0; k < 4; k++) {
int32_t val = data_R.i[k];
int32_t pval = data_Pr.i[k];
int16_t tval = (int16_t) (val >> (LOCAL_BUCKETS_BITS + log_parts));
int32_t hval = (val >> log_parts) & (LOCAL_BUCKETS - 1);
if (k < l_cnt_R) {
int32_t pos = head[hval];
while (pos >= 0) {
if (elem[pos] == tval) {
count += pval*payload[pos];
}
pos = next[pos];
}
}
}
}
__syncthreads();
}
}
atomicAdd(results, count);
__syncthreads();
}
/*essentially the join_partitioned_aggregate but builds hashtable in GPU memory*/
__global__ void join_partitioned_global (
const int32_t* R,
const int32_t* Pr,
const uint32_t* R_cnts,
const uint32_t* R_chain,
const int32_t* S,
const int32_t* Ps,
const uint32_t* S_cnts,
const uint32_t* S_chain,
int32_t log_parts,
int32_t* results,
int32_t* buffer) {
int tid = threadIdx.x;
int block = blockIdx.x;
int width = blockDim.x;
int pwidth = gridDim.x;
int parts = 1 << log_parts;
buffer += block*8*4096;
int16_t* elem = (int16_t*) buffer;
int32_t* payload = buffer + 4096 + 512;;
int16_t* next = (int16_t*) (buffer + 2*(4096 + 512));
int32_t* head = buffer + 3*(4096+512);
int lid = tid % 32;
int gnum = blockDim.x/32;
int count = 0;
int pr = -1;
int ps = -1;
for (uint32_t p = block; p < parts; p += pwidth) {
chain R_chains(R, R_cnts, R_chain, p);
chain Pr_chains(Pr, R_cnts, R_chain, p);
chain S_chains(S, S_cnts, S_chain, p);
chain Ps_chains(Ps, S_cnts, S_chain, p);
int len_R = R_cnts[p];
int len_S = S_cnts[p];
if (len_S > 4096 + 512) {
/*it was a microbenchmark so I didn't code this part*/
continue;
} else {
int off = 0;
for (int i = tid; i < LOCAL_BUCKETS; i += blockDim.x)
head[i] = -1;
int rem_s = len_S % 4096;
rem_s = (rem_s + 4 - 1)/4;
__syncthreads();
chain_iterator it_S = S_chains.begin();
chain_iterator it_Ps = Ps_chains.begin();
for (;it_S != S_chains.end(); ++it_S, ++it_Ps) {
vec4 data_S = (*it_S).x;
vec4 data_Ps = (*it_Ps).x;
int l_cnt_S = (*it_S).cnt;
#pragma unroll
for (int k = 0; k < 4; k++) {
if (k < l_cnt_S) {
int val = data_S.i[k];
elem[off + tid] = (int16_t) (val >> (LOCAL_BUCKETS_BITS + log_parts));
payload[off + tid] = data_Ps.i[k];
int hval = (val >> log_parts) & (LOCAL_BUCKETS - 1);
int32_t last = atomicExch(&head[hval], off + tid);
next[off + tid] = last;
}
off += (off < 4096)? blockDim.x : rem_s;
}
}
__syncthreads();
chain_iterator it_R = R_chains.begin();
chain_iterator it_Pr = Pr_chains.begin();
for (;it_R != R_chains.end(); ++it_R, ++it_Pr) {
vec4 data_R = (*it_R).x;
vec4 data_Pr = (*it_Pr).x;
int l_cnt_R = (*it_R).cnt;
#pragma unroll
for (int k = 0; k < 4; k++) {
int32_t val = data_R.i[k];
int32_t pval = data_Pr.i[k];
int16_t tval = (int16_t) (val >> (LOCAL_BUCKETS_BITS + log_parts));
int32_t hval = (val >> log_parts) & (LOCAL_BUCKETS - 1);
if (k < l_cnt_R) {
int32_t pos = head[hval];
while (pos >= 0) {
if (elem[pos] == tval) {
count += pval*payload[pos];
}
pos = next[pos];
}
}
}
}
__syncthreads();
}
}
atomicAdd(results, count);
__syncthreads();
} |
1117247ea6325d30bd3e9dc7eb7b15a7899e6ce1.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_factories.hpp>
#include <cudf/concatenate.hpp>
#include <cudf/detail/tdigest/tdigest.hpp>
#include <cudf/tdigest/tdigest_column_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf_test/column_utilities.hpp>
#include <cudf_test/cudf_gtest.hpp>
#include <cudf_test/tdigest_utilities.cuh>
#include <rmm/exec_policy.hpp>
#include <thrust/fill.h>
#include <thrust/for_each.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/tuple.h>
// for use with groupby and reduction aggregation tests.
namespace cudf {
namespace test {
void tdigest_sample_compare(cudf::tdigest::tdigest_column_view const& tdv,
std::vector<expected_value> const& h_expected)
{
column_view result_mean = tdv.means();
column_view result_weight = tdv.weights();
auto expected_mean = cudf::make_fixed_width_column(
data_type{type_id::FLOAT64}, h_expected.size(), mask_state::UNALLOCATED);
auto expected_weight = cudf::make_fixed_width_column(
data_type{type_id::FLOAT64}, h_expected.size(), mask_state::UNALLOCATED);
auto sampled_result_mean = cudf::make_fixed_width_column(
data_type{type_id::FLOAT64}, h_expected.size(), mask_state::UNALLOCATED);
auto sampled_result_weight = cudf::make_fixed_width_column(
data_type{type_id::FLOAT64}, h_expected.size(), mask_state::UNALLOCATED);
auto h_expected_src = std::vector<size_type>(h_expected.size());
auto h_expected_mean = std::vector<double>(h_expected.size());
auto h_expected_weight = std::vector<double>(h_expected.size());
{
auto iter = thrust::make_counting_iterator(0);
std::for_each_n(iter, h_expected.size(), [&](size_type const index) {
h_expected_src[index] = thrust::get<0>(h_expected[index]);
h_expected_mean[index] = thrust::get<1>(h_expected[index]);
h_expected_weight[index] = thrust::get<2>(h_expected[index]);
});
}
auto d_expected_src = cudf::detail::make_device_uvector_async(
h_expected_src, cudf::get_default_stream(), rmm::mr::get_current_device_resource());
auto d_expected_mean = cudf::detail::make_device_uvector_async(
h_expected_mean, cudf::get_default_stream(), rmm::mr::get_current_device_resource());
auto d_expected_weight = cudf::detail::make_device_uvector_async(
h_expected_weight, cudf::get_default_stream(), rmm::mr::get_current_device_resource());
auto iter = thrust::make_counting_iterator(0);
thrust::for_each(
rmm::exec_policy(cudf::get_default_stream()),
iter,
iter + h_expected.size(),
[expected_src_in = d_expected_src.data(),
expected_mean_in = d_expected_mean.data(),
expected_weight_in = d_expected_weight.data(),
expected_mean = expected_mean->mutable_view().begin<double>(),
expected_weight = expected_weight->mutable_view().begin<double>(),
result_mean = result_mean.begin<double>(),
result_weight = result_weight.begin<double>(),
sampled_result_mean = sampled_result_mean->mutable_view().begin<double>(),
sampled_result_weight =
sampled_result_weight->mutable_view().begin<double>()] __device__(size_type index) {
expected_mean[index] = expected_mean_in[index];
expected_weight[index] = expected_weight_in[index];
auto const src_index = expected_src_in[index];
sampled_result_mean[index] = result_mean[src_index];
sampled_result_weight[index] = result_weight[src_index];
});
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(*expected_mean, *sampled_result_mean);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(*expected_weight, *sampled_result_weight);
}
std::unique_ptr<column> make_expected_tdigest_column(std::vector<expected_tdigest> const& groups)
{
std::vector<std::unique_ptr<column>> tdigests;
// make an individual digest
auto make_digest = [&](expected_tdigest const& tdigest) {
std::vector<std::unique_ptr<column>> inner_children;
inner_children.push_back(std::make_unique<cudf::column>(tdigest.mean));
inner_children.push_back(std::make_unique<cudf::column>(tdigest.weight));
// tdigest struct
auto tdigests =
cudf::make_structs_column(tdigest.mean.size(), std::move(inner_children), 0, {});
std::vector<size_type> h_offsets{0, tdigest.mean.size()};
auto offsets =
cudf::make_fixed_width_column(data_type{type_id::INT32}, 2, mask_state::UNALLOCATED);
CUDF_CUDA_TRY(hipMemcpy(offsets->mutable_view().begin<size_type>(),
h_offsets.data(),
sizeof(size_type) * 2,
hipMemcpyDefault));
auto list = cudf::make_lists_column(1, std::move(offsets), std::move(tdigests), 0, {});
auto min_col =
cudf::make_fixed_width_column(data_type{type_id::FLOAT64}, 1, mask_state::UNALLOCATED);
thrust::fill(rmm::exec_policy(cudf::get_default_stream()),
min_col->mutable_view().begin<double>(),
min_col->mutable_view().end<double>(),
tdigest.min);
auto max_col =
cudf::make_fixed_width_column(data_type{type_id::FLOAT64}, 1, mask_state::UNALLOCATED);
thrust::fill(rmm::exec_policy(cudf::get_default_stream()),
max_col->mutable_view().begin<double>(),
max_col->mutable_view().end<double>(),
tdigest.max);
std::vector<std::unique_ptr<column>> children;
children.push_back(std::move(list));
children.push_back(std::move(min_col));
children.push_back(std::move(max_col));
return make_structs_column(1, std::move(children), 0, {});
};
// build the individual digests
std::transform(groups.begin(), groups.end(), std::back_inserter(tdigests), make_digest);
// concatenate them
std::vector<column_view> views;
std::transform(tdigests.begin(),
tdigests.end(),
std::back_inserter(views),
[](std::unique_ptr<column> const& c) { return c->view(); });
return cudf::concatenate(views);
}
} // namespace test
} // namespace cudf
| 1117247ea6325d30bd3e9dc7eb7b15a7899e6ce1.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_factories.hpp>
#include <cudf/concatenate.hpp>
#include <cudf/detail/tdigest/tdigest.hpp>
#include <cudf/tdigest/tdigest_column_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf_test/column_utilities.hpp>
#include <cudf_test/cudf_gtest.hpp>
#include <cudf_test/tdigest_utilities.cuh>
#include <rmm/exec_policy.hpp>
#include <thrust/fill.h>
#include <thrust/for_each.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/tuple.h>
// for use with groupby and reduction aggregation tests.
namespace cudf {
namespace test {
void tdigest_sample_compare(cudf::tdigest::tdigest_column_view const& tdv,
std::vector<expected_value> const& h_expected)
{
column_view result_mean = tdv.means();
column_view result_weight = tdv.weights();
auto expected_mean = cudf::make_fixed_width_column(
data_type{type_id::FLOAT64}, h_expected.size(), mask_state::UNALLOCATED);
auto expected_weight = cudf::make_fixed_width_column(
data_type{type_id::FLOAT64}, h_expected.size(), mask_state::UNALLOCATED);
auto sampled_result_mean = cudf::make_fixed_width_column(
data_type{type_id::FLOAT64}, h_expected.size(), mask_state::UNALLOCATED);
auto sampled_result_weight = cudf::make_fixed_width_column(
data_type{type_id::FLOAT64}, h_expected.size(), mask_state::UNALLOCATED);
auto h_expected_src = std::vector<size_type>(h_expected.size());
auto h_expected_mean = std::vector<double>(h_expected.size());
auto h_expected_weight = std::vector<double>(h_expected.size());
{
auto iter = thrust::make_counting_iterator(0);
std::for_each_n(iter, h_expected.size(), [&](size_type const index) {
h_expected_src[index] = thrust::get<0>(h_expected[index]);
h_expected_mean[index] = thrust::get<1>(h_expected[index]);
h_expected_weight[index] = thrust::get<2>(h_expected[index]);
});
}
auto d_expected_src = cudf::detail::make_device_uvector_async(
h_expected_src, cudf::get_default_stream(), rmm::mr::get_current_device_resource());
auto d_expected_mean = cudf::detail::make_device_uvector_async(
h_expected_mean, cudf::get_default_stream(), rmm::mr::get_current_device_resource());
auto d_expected_weight = cudf::detail::make_device_uvector_async(
h_expected_weight, cudf::get_default_stream(), rmm::mr::get_current_device_resource());
auto iter = thrust::make_counting_iterator(0);
thrust::for_each(
rmm::exec_policy(cudf::get_default_stream()),
iter,
iter + h_expected.size(),
[expected_src_in = d_expected_src.data(),
expected_mean_in = d_expected_mean.data(),
expected_weight_in = d_expected_weight.data(),
expected_mean = expected_mean->mutable_view().begin<double>(),
expected_weight = expected_weight->mutable_view().begin<double>(),
result_mean = result_mean.begin<double>(),
result_weight = result_weight.begin<double>(),
sampled_result_mean = sampled_result_mean->mutable_view().begin<double>(),
sampled_result_weight =
sampled_result_weight->mutable_view().begin<double>()] __device__(size_type index) {
expected_mean[index] = expected_mean_in[index];
expected_weight[index] = expected_weight_in[index];
auto const src_index = expected_src_in[index];
sampled_result_mean[index] = result_mean[src_index];
sampled_result_weight[index] = result_weight[src_index];
});
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(*expected_mean, *sampled_result_mean);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(*expected_weight, *sampled_result_weight);
}
std::unique_ptr<column> make_expected_tdigest_column(std::vector<expected_tdigest> const& groups)
{
std::vector<std::unique_ptr<column>> tdigests;
// make an individual digest
auto make_digest = [&](expected_tdigest const& tdigest) {
std::vector<std::unique_ptr<column>> inner_children;
inner_children.push_back(std::make_unique<cudf::column>(tdigest.mean));
inner_children.push_back(std::make_unique<cudf::column>(tdigest.weight));
// tdigest struct
auto tdigests =
cudf::make_structs_column(tdigest.mean.size(), std::move(inner_children), 0, {});
std::vector<size_type> h_offsets{0, tdigest.mean.size()};
auto offsets =
cudf::make_fixed_width_column(data_type{type_id::INT32}, 2, mask_state::UNALLOCATED);
CUDF_CUDA_TRY(cudaMemcpy(offsets->mutable_view().begin<size_type>(),
h_offsets.data(),
sizeof(size_type) * 2,
cudaMemcpyDefault));
auto list = cudf::make_lists_column(1, std::move(offsets), std::move(tdigests), 0, {});
auto min_col =
cudf::make_fixed_width_column(data_type{type_id::FLOAT64}, 1, mask_state::UNALLOCATED);
thrust::fill(rmm::exec_policy(cudf::get_default_stream()),
min_col->mutable_view().begin<double>(),
min_col->mutable_view().end<double>(),
tdigest.min);
auto max_col =
cudf::make_fixed_width_column(data_type{type_id::FLOAT64}, 1, mask_state::UNALLOCATED);
thrust::fill(rmm::exec_policy(cudf::get_default_stream()),
max_col->mutable_view().begin<double>(),
max_col->mutable_view().end<double>(),
tdigest.max);
std::vector<std::unique_ptr<column>> children;
children.push_back(std::move(list));
children.push_back(std::move(min_col));
children.push_back(std::move(max_col));
return make_structs_column(1, std::move(children), 0, {});
};
// build the individual digests
std::transform(groups.begin(), groups.end(), std::back_inserter(tdigests), make_digest);
// concatenate them
std::vector<column_view> views;
std::transform(tdigests.begin(),
tdigests.end(),
std::back_inserter(views),
[](std::unique_ptr<column> const& c) { return c->view(); });
return cudf::concatenate(views);
}
} // namespace test
} // namespace cudf
|
7d0cc0e2186e6d2116120a982a5d929263e478b1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <hiprand/hiprand_kernel.h> // CURAND lib header file
#define TRIALS_PER_THREAD 2048
#define BLOCKS 256
#define THREADS 256
#define PI 3.1415926535 // known value of pi
__global__ void pi_mc(float *estimate, hiprandState_t *states) {
unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x;
int points_in_circle = 0;
float x, y;
// Initialize CURAND
hiprand_init(tid, 0, 0, &states[tid]);
for(int i = 0; i < TRIALS_PER_THREAD; i++) {
x = hiprand_uniform(&states[tid]);
y = hiprand_uniform(&states[tid]);
// count if x & y is in the circule.
points_in_circle += (x*x + y*y <= 1.0f);
}
estimate[tid] = 4.0f * points_in_circle / (float) TRIALS_PER_THREAD;
}
int main(int argc, char *argv[]) {
float host[BLOCKS * THREADS];
float *dev;
hiprandState_t *devStates;
hipMalloc((void **) &dev, BLOCKS * THREADS * sizeof(float));
hipMalloc( (void **)&devStates, BLOCKS*THREADS*sizeof(hiprandState_t) );
hipLaunchKernelGGL(( pi_mc), dim3(BLOCKS), dim3(THREADS), 0, 0, dev, devStates);
hipMemcpy(host, dev, BLOCKS * THREADS * sizeof(float),
hipMemcpyDeviceToHost);
float pi_gpu=0.0;
for(int i = 0; i < BLOCKS * THREADS; i++) pi_gpu += host[i];
pi_gpu /= (BLOCKS * THREADS);
printf("CUDA estimate of PI = %f [error of %f ]\n",
pi_gpu, pi_gpu - PI);
hipFree(dev);
hipFree(devStates);
return 0;
}
| 7d0cc0e2186e6d2116120a982a5d929263e478b1.cu | #include <stdio.h>
#include <stdlib.h>
#include <curand_kernel.h> // CURAND lib header file
#define TRIALS_PER_THREAD 2048
#define BLOCKS 256
#define THREADS 256
#define PI 3.1415926535 // known value of pi
__global__ void pi_mc(float *estimate, curandState *states) {
unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x;
int points_in_circle = 0;
float x, y;
// Initialize CURAND
curand_init(tid, 0, 0, &states[tid]);
for(int i = 0; i < TRIALS_PER_THREAD; i++) {
x = curand_uniform(&states[tid]);
y = curand_uniform(&states[tid]);
// count if x & y is in the circule.
points_in_circle += (x*x + y*y <= 1.0f);
}
estimate[tid] = 4.0f * points_in_circle / (float) TRIALS_PER_THREAD;
}
int main(int argc, char *argv[]) {
float host[BLOCKS * THREADS];
float *dev;
curandState *devStates;
cudaMalloc((void **) &dev, BLOCKS * THREADS * sizeof(float));
cudaMalloc( (void **)&devStates, BLOCKS*THREADS*sizeof(curandState) );
pi_mc<<<BLOCKS, THREADS>>>(dev, devStates);
cudaMemcpy(host, dev, BLOCKS * THREADS * sizeof(float),
cudaMemcpyDeviceToHost);
float pi_gpu=0.0;
for(int i = 0; i < BLOCKS * THREADS; i++) pi_gpu += host[i];
pi_gpu /= (BLOCKS * THREADS);
printf("CUDA estimate of PI = %f [error of %f ]\n",
pi_gpu, pi_gpu - PI);
cudaFree(dev);
cudaFree(devStates);
return 0;
}
|
a179ae60bb0cb77106f72b02a361eb4adca5efd2.hip | // !!! This is a file automatically generated by hipify!!!
#include "split/device/detail/cu_raii.cuh"
SPLIT_DEVICE_NAMESPACE_BEGIN
namespace detail
{
namespace cu_raii
{
Stream::Stream()
{
status = hipStreamCreate(&handle);
}
Stream::~Stream()
{
join();
hipStreamDestroy(handle);
}
Stream::operator hipStream_t() const noexcept
{
return handle;
}
void Stream::join() noexcept
{
status = hipStreamSynchronize(handle);
}
namespace blas
{
Handle::Handle()
{
status = hipblasCreate(&handle);
}
Handle::~Handle()
{
hipblasDestroy(handle);
}
Handle::operator hipblasHandle_t() const noexcept
{
return handle;
}
}
namespace solver
{
SolverDn::SolverDn()
{
status = hipsolverDnCreate(&handle);
}
SolverDn::~SolverDn()
{
hipsolverDnDestroy(handle);
}
SolverDn::operator hipsolverDnHandle_t() const noexcept
{
return handle;
}
SolverSp::SolverSp()
{
status = cusolverSpCreate(&handle);
}
SolverSp::~SolverSp()
{
cusolverSpDestroy(handle);
}
SolverSp::operator cusolverSpHandle_t() const noexcept
{
return handle;
}
} // namespace solver
namespace sparse
{
Handle::Handle()
{
status = hipsparseCreate(&handle);
}
Handle::~Handle()
{
hipsparseDestroy(handle);
}
Handle::operator hipsparseHandle_t() const noexcept
{
return handle;
}
MatrixDescription::MatrixDescription()
{
hipsparseCreateMatDescr(&description);
}
MatrixDescription::MatrixDescription(hipsparseStatus_t* io_status)
{
*io_status = hipsparseCreateMatDescr(&description);
}
MatrixDescription::~MatrixDescription()
{
hipsparseDestroyMatDescr(description);
}
MatrixDescription::operator hipsparseMatDescr_t() const noexcept
{
return description;
}
} // namespace sparse
}
}
SPLIT_DEVICE_NAMESPACE_END
| a179ae60bb0cb77106f72b02a361eb4adca5efd2.cu | #include "split/device/detail/cu_raii.cuh"
SPLIT_DEVICE_NAMESPACE_BEGIN
namespace detail
{
namespace cu_raii
{
Stream::Stream()
{
status = cudaStreamCreate(&handle);
}
Stream::~Stream()
{
join();
cudaStreamDestroy(handle);
}
Stream::operator cudaStream_t() const noexcept
{
return handle;
}
void Stream::join() noexcept
{
status = cudaStreamSynchronize(handle);
}
namespace blas
{
Handle::Handle()
{
status = cublasCreate(&handle);
}
Handle::~Handle()
{
cublasDestroy(handle);
}
Handle::operator cublasHandle_t() const noexcept
{
return handle;
}
}
namespace solver
{
SolverDn::SolverDn()
{
status = cusolverDnCreate(&handle);
}
SolverDn::~SolverDn()
{
cusolverDnDestroy(handle);
}
SolverDn::operator cusolverDnHandle_t() const noexcept
{
return handle;
}
SolverSp::SolverSp()
{
status = cusolverSpCreate(&handle);
}
SolverSp::~SolverSp()
{
cusolverSpDestroy(handle);
}
SolverSp::operator cusolverSpHandle_t() const noexcept
{
return handle;
}
} // namespace solver
namespace sparse
{
Handle::Handle()
{
status = cusparseCreate(&handle);
}
Handle::~Handle()
{
cusparseDestroy(handle);
}
Handle::operator cusparseHandle_t() const noexcept
{
return handle;
}
MatrixDescription::MatrixDescription()
{
cusparseCreateMatDescr(&description);
}
MatrixDescription::MatrixDescription(cusparseStatus_t* io_status)
{
*io_status = cusparseCreateMatDescr(&description);
}
MatrixDescription::~MatrixDescription()
{
cusparseDestroyMatDescr(description);
}
MatrixDescription::operator cusparseMatDescr_t() const noexcept
{
return description;
}
} // namespace sparse
}
}
SPLIT_DEVICE_NAMESPACE_END
|
9e3753348bb706f29a9d446e3d8e07adb2a0dbde.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 1993-2015, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "particle.h"
#include <stdlib.h>
#include <stdio.h>
# include <iostream>
# include <vector>
# include <fstream>
# include <numeric>
# include <cmath>
# include <chrono>
# include "sparsematrix.h"
#include <vector>
const std::size_t Max_Iter = 50;
void print(const std::vector<double> &X);
std::vector<double> solve_SI(
const SparseMatrix &A,
double Epsilon);
std::vector<double> multiple(
SparseMatrix &matrix,
const std::vector<double> &vec);
double norm(const std::vector<double> &vec);
std::vector<double> plus(
const std::vector<double> &first,
const std::vector<double> &second);
std::vector<double> multiple(
const std::vector<double> &vec,
const double scalar);
void TransformMatrix(SparseMatrix &matrix);
//__global__ void advanceParticles(float dt, particle * pArray, int nParticles)
//{
// int idx = threadIdx.x + blockIdx.x*blockDim.x; // number thread
// if(idx < nParticles)
// {
// pArray[idx].advance(dt);
// }
//}
//__global__ void advanceParticles(particle * pArray, int nParticles, int nrows)
//{
// int idx = threadIdx.x + blockIdx.x*blockDim.x; // number thread
// if(idx < nParticles)
// {
// // pArray[idx].advance(dt);
// for(int j = 0; j < nrows; j++)
// {
// double x = pArray->matrixS.get(idx,j);
// double y = pArray->vecS[j];
// double r = pArray->Res[idx];
// double z = x*y;
// r = r+z;
// pArray->Res[idx] =r;
// }
// }
//}
__global__ void advanceParticles(particle * pArray, int nParticles, int nrows)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x; // number thread
if(idx < nParticles)
{
// pArray[idx].advance(dt);
for(int j = 0; j < nrows; j++)
{
double x = pArray->matrixF[idx*j];
double y = pArray->vecF[j];
double r = pArray->vecR[idx];
double z = x*y;
r = r+z;
pArray->vecR[idx] = r;
}
}
}
std::vector<double> multiple(
SparseMatrix &matrix,
const std::vector<double> &vec)
{
std::cout << "multiple double - 0" << std::endl;
std::vector<double> Res(0);
hipError_t error;
std::cout << "multiple double - 1" << std::endl;
// int n = 1000000;
int n = vec.size();
// if(argc > 1) { n = atoi(argv[1]);} // Number of particles
// if(argc > 2) { srand(atoi(argv[2])); } // Random seed
std::cout << "multiple double - 2" << std::endl;
error = hipGetLastError();
std::cout << "multiple double - 3" << std::endl;
if (error != hipSuccess)
{
printf("0 %s\n",hipGetErrorString(error));
exit(1);
}
std::cout << "multiple void start" << std::endl;
particle * pArray = new particle[0];
pArray->multiple2(matrix, vec);
particle * devPArray = NULL;
hipMalloc(&devPArray, sizeof(pArray));
hipDeviceSynchronize(); error = hipGetLastError();
if (error != hipSuccess)
{
printf("1 %s\n",hipGetErrorString(error));
exit(1);
}
hipMemcpy(devPArray, pArray, sizeof(pArray), hipMemcpyHostToDevice);
hipDeviceSynchronize(); error = hipGetLastError();
if (error != hipSuccess)
{
printf("2 %s\n",hipGetErrorString(error));
exit(1);
}
int bufN = vec.size();
int buf_nrows = matrix.m_nrows;
//for(int i=0; i<100; i++)
//{
// float dt = (float)rand()/(float) RAND_MAX; // Random distance each step
hipLaunchKernelGGL(( advanceParticles), dim3(1 + n/256), dim3(256), 0, 0, devPArray, bufN, buf_nrows);
error = hipGetLastError();
if (error != hipSuccess)
{
printf("3 %s\n",hipGetErrorString(error));
exit(1);
}
hipDeviceSynchronize();
//}
hipMemcpy(pArray, devPArray, sizeof(pArray), hipMemcpyDeviceToHost);
// v3 totalDistance(0,0,0);
// v3 temp;
// for(int i=0; i<n; i++)
// {
// temp = pArray[i].getTotalDistance();
// totalDistance.x += temp.x;
// totalDistance.y += temp.y;
// totalDistance.z += temp.z;
// }
// float avgX = totalDistance.x /(float)n;
// float avgY = totalDistance.y /(float)n;
// float avgZ = totalDistance.z /(float)n;
// float avgNorm = sqrt(avgX*avgX + avgY*avgY + avgZ*avgZ);
// printf( "Moved %d particles 100 steps. Average distance traveled is |(%f, %f, %f)| = %f\n",
// n, avgX, avgY, avgZ, avgNorm);
return pArray->resultVector();
}
int main(int argc, char ** argv)
{
auto matrix = SparseMatrix("a01.txt","b01.txt");
auto start = std::chrono::steady_clock::now();
auto X = solve_SI(matrix,1.0e-020);
auto end = std::chrono::steady_clock::now();
auto diff = end - start;
std::cout <<"Task takes "<< std::chrono::duration_cast<std::chrono::seconds>(diff).count() << " seconds\n";
std::ofstream out("out_SI.txt");
for(const auto &el : X)
out<<el<<'\n';
return 0;
}
//
double norm(const std::vector<double> &vec)
{
return sqrt(std::accumulate(vec.begin(),vec.end(),0.0,[](double x,double y)
{
return x+y*y;
}));
}
//
std::vector<double> plus(
const std::vector<double> &first,
const std::vector<double> &second)
{
std::vector<double> r(first.size(),0);
std::transform(first.begin(),first.end(),second.begin(),r.begin(),[](const double &x,const double &y)
{
return x+y;
});
return r;
}
// ()
std::vector<double> multiple(
const std::vector<double> &vec,
const double scalar)
{
std::vector<double> R(vec.size());
std::transform(vec.begin(),vec.end(),R.begin(),[&scalar](const double &x)
{
return x*scalar;
});
return R;
}
//
void TransformMatrix(SparseMatrix &matrix)
{
std::cout << "TransformMatrix" << std::endl;
auto n = matrix.m_nrows;
for(std::size_t i=0; i<n; i++)
{
auto el = matrix.get(i,i);
if(el>0)
{
matrix.set(1-el,i,i);
for(size_t j=0; j<n; j++)
if(i!=j)
matrix.set(-matrix.get(i,j),i,j);
}
else
{
matrix.set(1+el,i,i);
matrix.set(-matrix.get(i,n),i,n);
}
}
}
//
std::vector<double> solve_SI(
const SparseMatrix &A,
double Epsilon)
{
std::size_t iter=1;
auto Matrix = A;
auto n = Matrix.m_nrows;
if(n == 0)
return std::vector<double>{};
auto B = Matrix.get_column(n);
TransformMatrix(Matrix);
std::cout << "start" << std::endl;
auto X = std::vector<double>(n,0);
while(iter<Max_Iter)
{
//
auto Xnew = plus(multiple(Matrix,X),B);
// ( 2- )
double n2 = norm(plus(Xnew,multiple(X,-1)));
if(n2<Epsilon)
{
std::cout<<" :"<<iter<<'\n';
return X;
}
X = Xnew;
++iter;
}
std::cout<<" \n";
return X;
}
void print(const std::vector<double> &X)
{
for(const auto &el : X)
std::cout<<el<<' ';
std::cout<<'\n';
}
| 9e3753348bb706f29a9d446e3d8e07adb2a0dbde.cu | /* Copyright (c) 1993-2015, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "particle.h"
#include <stdlib.h>
#include <stdio.h>
# include <iostream>
# include <vector>
# include <fstream>
# include <numeric>
# include <cmath>
# include <chrono>
# include "sparsematrix.h"
#include <vector>
const std::size_t Max_Iter = 50;
void print(const std::vector<double> &X);
std::vector<double> solve_SI(
const SparseMatrix &A,
double Epsilon);
std::vector<double> multiple(
SparseMatrix &matrix,
const std::vector<double> &vec);
double norm(const std::vector<double> &vec);
std::vector<double> plus(
const std::vector<double> &first,
const std::vector<double> &second);
std::vector<double> multiple(
const std::vector<double> &vec,
const double scalar);
void TransformMatrix(SparseMatrix &matrix);
//__global__ void advanceParticles(float dt, particle * pArray, int nParticles)
//{
// int idx = threadIdx.x + blockIdx.x*blockDim.x; // number thread
// if(idx < nParticles)
// {
// pArray[idx].advance(dt);
// }
//}
//__global__ void advanceParticles(particle * pArray, int nParticles, int nrows)
//{
// int idx = threadIdx.x + blockIdx.x*blockDim.x; // number thread
// if(idx < nParticles)
// {
// // pArray[idx].advance(dt);
// for(int j = 0; j < nrows; j++)
// {
// double x = pArray->matrixS.get(idx,j);
// double y = pArray->vecS[j];
// double r = pArray->Res[idx];
// double z = x*y;
// r = r+z;
// pArray->Res[idx] =r;
// }
// }
//}
__global__ void advanceParticles(particle * pArray, int nParticles, int nrows)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x; // number thread
if(idx < nParticles)
{
// pArray[idx].advance(dt);
for(int j = 0; j < nrows; j++)
{
double x = pArray->matrixF[idx*j];
double y = pArray->vecF[j];
double r = pArray->vecR[idx];
double z = x*y;
r = r+z;
pArray->vecR[idx] = r;
}
}
}
std::vector<double> multiple(
SparseMatrix &matrix,
const std::vector<double> &vec)
{
std::cout << "multiple double - 0" << std::endl;
std::vector<double> Res(0);
cudaError_t error;
std::cout << "multiple double - 1" << std::endl;
// int n = 1000000;
int n = vec.size();
// if(argc > 1) { n = atoi(argv[1]);} // Number of particles
// if(argc > 2) { srand(atoi(argv[2])); } // Random seed
std::cout << "multiple double - 2" << std::endl;
error = cudaGetLastError();
std::cout << "multiple double - 3" << std::endl;
if (error != cudaSuccess)
{
printf("0 %s\n",cudaGetErrorString(error));
exit(1);
}
std::cout << "multiple void start" << std::endl;
particle * pArray = new particle[0];
pArray->multiple2(matrix, vec);
particle * devPArray = NULL;
cudaMalloc(&devPArray, sizeof(pArray));
cudaDeviceSynchronize(); error = cudaGetLastError();
if (error != cudaSuccess)
{
printf("1 %s\n",cudaGetErrorString(error));
exit(1);
}
cudaMemcpy(devPArray, pArray, sizeof(pArray), cudaMemcpyHostToDevice);
cudaDeviceSynchronize(); error = cudaGetLastError();
if (error != cudaSuccess)
{
printf("2 %s\n",cudaGetErrorString(error));
exit(1);
}
int bufN = vec.size();
int buf_nrows = matrix.m_nrows;
//for(int i=0; i<100; i++)
//{
// float dt = (float)rand()/(float) RAND_MAX; // Random distance each step
advanceParticles<<< 1 + n/256, 256>>>(devPArray, bufN, buf_nrows);
error = cudaGetLastError();
if (error != cudaSuccess)
{
printf("3 %s\n",cudaGetErrorString(error));
exit(1);
}
cudaDeviceSynchronize();
//}
cudaMemcpy(pArray, devPArray, sizeof(pArray), cudaMemcpyDeviceToHost);
// v3 totalDistance(0,0,0);
// v3 temp;
// for(int i=0; i<n; i++)
// {
// temp = pArray[i].getTotalDistance();
// totalDistance.x += temp.x;
// totalDistance.y += temp.y;
// totalDistance.z += temp.z;
// }
// float avgX = totalDistance.x /(float)n;
// float avgY = totalDistance.y /(float)n;
// float avgZ = totalDistance.z /(float)n;
// float avgNorm = sqrt(avgX*avgX + avgY*avgY + avgZ*avgZ);
// printf( "Moved %d particles 100 steps. Average distance traveled is |(%f, %f, %f)| = %f\n",
// n, avgX, avgY, avgZ, avgNorm);
return pArray->resultVector();
}
int main(int argc, char ** argv)
{
auto matrix = SparseMatrix("a01.txt","b01.txt");
auto start = std::chrono::steady_clock::now();
auto X = solve_SI(matrix,1.0e-020);
auto end = std::chrono::steady_clock::now();
auto diff = end - start;
std::cout <<"Task takes "<< std::chrono::duration_cast<std::chrono::seconds>(diff).count() << " seconds\n";
std::ofstream out("out_SI.txt");
for(const auto &el : X)
out<<el<<'\n';
return 0;
}
//вычисление нормы
double norm(const std::vector<double> &vec)
{
return sqrt(std::accumulate(vec.begin(),vec.end(),0.0,[](double x,double y)
{
return x+y*y;
}));
}
//сложение векторов
std::vector<double> plus(
const std::vector<double> &first,
const std::vector<double> &second)
{
std::vector<double> r(first.size(),0);
std::transform(first.begin(),first.end(),second.begin(),r.begin(),[](const double &x,const double &y)
{
return x+y;
});
return r;
}
//умножение вектора на скаляр (поэлементное)
std::vector<double> multiple(
const std::vector<double> &vec,
const double scalar)
{
std::vector<double> R(vec.size());
std::transform(vec.begin(),vec.end(),R.begin(),[&scalar](const double &x)
{
return x*scalar;
});
return R;
}
//приведение матрицы к необходимому виду
void TransformMatrix(SparseMatrix &matrix)
{
std::cout << "TransformMatrix" << std::endl;
auto n = matrix.m_nrows;
for(std::size_t i=0; i<n; i++)
{
auto el = matrix.get(i,i);
if(el>0)
{
matrix.set(1-el,i,i);
for(size_t j=0; j<n; j++)
if(i!=j)
matrix.set(-matrix.get(i,j),i,j);
}
else
{
matrix.set(1+el,i,i);
matrix.set(-matrix.get(i,n),i,n);
}
}
}
//реализация метода
std::vector<double> solve_SI(
const SparseMatrix &A,
double Epsilon)
{
std::size_t iter=1;
auto Matrix = A;
auto n = Matrix.m_nrows;
if(n == 0)
return std::vector<double>{};
auto B = Matrix.get_column(n);
TransformMatrix(Matrix);
std::cout << "start" << std::endl;
auto X = std::vector<double>(n,0);
while(iter<Max_Iter)
{
//делаем шаг
auto Xnew = plus(multiple(Matrix,X),B);
//расчитываем норму приращения(разница между 2-мя приближениями)
double n2 = norm(plus(Xnew,multiple(X,-1)));
if(n2<Epsilon)
{
std::cout<<"Количество итераций:"<<iter<<'\n';
return X;
}
X = Xnew;
++iter;
}
std::cout<<"Метод расходится\n";
return X;
}
void print(const std::vector<double> &X)
{
for(const auto &el : X)
std::cout<<el<<' ';
std::cout<<'\n';
}
|
c1489b882df47deca78ea7f8274c7236aec8a39e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include "../box_iou_rotated/box_iou_rotated_utils.h"
using namespace detectron2;
namespace {
int const threadsPerBlock = sizeof(unsigned long long) * 8;
}
template <typename T>
__global__ void nms_rotated_cuda_kernel(
const int n_boxes,
const float iou_threshold,
const T* dev_boxes,
unsigned long long* dev_mask) {
// nms_rotated_cuda_kernel is modified from torchvision's nms_cuda_kernel
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size =
min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
// Compared to nms_cuda_kernel, where each box is represented with 4 values
// (x1, y1, x2, y2), each rotated box is represented with 5 values
// (x_center, y_center, width, height, angle_degrees) here.
__shared__ T block_boxes[threadsPerBlock * 5];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 5 + 0] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const T* cur_box = dev_boxes + cur_box_idx * 5;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
// Instead of devIoU used by original horizontal nms, here
// we use the single_box_iou_rotated function from box_iou_rotated_utils.h
if (single_box_iou_rotated<T>(cur_box, block_boxes + i * 5) >
iou_threshold) {
t |= 1ULL << i;
}
}
const int col_blocks = at::cuda::ATenCeilDiv(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
namespace detectron2 {
at::Tensor nms_rotated_cuda(
// input must be contiguous
const at::Tensor& dets,
const at::Tensor& scores,
float iou_threshold) {
// using scalar_t = float;
AT_ASSERTM(dets.type().is_cuda(), "dets must be a CUDA tensor");
AT_ASSERTM(scores.type().is_cuda(), "scores must be a CUDA tensor");
at::hip::HIPGuardMasqueradingAsCUDA device_guard(dets.device());
auto order_t = std::get<1>(scores.sort(0, /* descending=*/true));
auto dets_sorted = dets.index_select(0, order_t);
auto dets_num = dets.size(0);
const int col_blocks =
at::cuda::ATenCeilDiv(static_cast<int>(dets_num), threadsPerBlock);
at::Tensor mask =
at::empty({dets_num * col_blocks}, dets.options().dtype(at::kLong));
dim3 blocks(col_blocks, col_blocks);
dim3 threads(threadsPerBlock);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES(
dets_sorted.type(), "nms_rotated_kernel_cuda", [&] {
hipLaunchKernelGGL(( nms_rotated_cuda_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, stream,
dets_num,
iou_threshold,
dets_sorted.data<scalar_t>(),
(unsigned long long*)mask.data<int64_t>());
});
at::Tensor mask_cpu = mask.to(at::kCPU);
unsigned long long* mask_host = (unsigned long long*)mask_cpu.data<int64_t>();
std::vector<unsigned long long> remv(col_blocks);
memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
at::Tensor keep =
at::empty({dets_num}, dets.options().dtype(at::kLong).device(at::kCPU));
int64_t* keep_out = keep.data<int64_t>();
int num_to_keep = 0;
for (int i = 0; i < dets_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep_out[num_to_keep++] = i;
unsigned long long* p = mask_host + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
AT_CUDA_CHECK(hipGetLastError());
return order_t.index(
{keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep)
.to(order_t.device(), keep.scalar_type())});
}
} // namespace detectron2
| c1489b882df47deca78ea7f8274c7236aec8a39e.cu | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <c10/cuda/CUDAGuard.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include "../box_iou_rotated/box_iou_rotated_utils.h"
using namespace detectron2;
namespace {
int const threadsPerBlock = sizeof(unsigned long long) * 8;
}
template <typename T>
__global__ void nms_rotated_cuda_kernel(
const int n_boxes,
const float iou_threshold,
const T* dev_boxes,
unsigned long long* dev_mask) {
// nms_rotated_cuda_kernel is modified from torchvision's nms_cuda_kernel
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size =
min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
// Compared to nms_cuda_kernel, where each box is represented with 4 values
// (x1, y1, x2, y2), each rotated box is represented with 5 values
// (x_center, y_center, width, height, angle_degrees) here.
__shared__ T block_boxes[threadsPerBlock * 5];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 5 + 0] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const T* cur_box = dev_boxes + cur_box_idx * 5;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
// Instead of devIoU used by original horizontal nms, here
// we use the single_box_iou_rotated function from box_iou_rotated_utils.h
if (single_box_iou_rotated<T>(cur_box, block_boxes + i * 5) >
iou_threshold) {
t |= 1ULL << i;
}
}
const int col_blocks = at::cuda::ATenCeilDiv(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
namespace detectron2 {
at::Tensor nms_rotated_cuda(
// input must be contiguous
const at::Tensor& dets,
const at::Tensor& scores,
float iou_threshold) {
// using scalar_t = float;
AT_ASSERTM(dets.type().is_cuda(), "dets must be a CUDA tensor");
AT_ASSERTM(scores.type().is_cuda(), "scores must be a CUDA tensor");
at::cuda::CUDAGuard device_guard(dets.device());
auto order_t = std::get<1>(scores.sort(0, /* descending=*/true));
auto dets_sorted = dets.index_select(0, order_t);
auto dets_num = dets.size(0);
const int col_blocks =
at::cuda::ATenCeilDiv(static_cast<int>(dets_num), threadsPerBlock);
at::Tensor mask =
at::empty({dets_num * col_blocks}, dets.options().dtype(at::kLong));
dim3 blocks(col_blocks, col_blocks);
dim3 threads(threadsPerBlock);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES(
dets_sorted.type(), "nms_rotated_kernel_cuda", [&] {
nms_rotated_cuda_kernel<scalar_t><<<blocks, threads, 0, stream>>>(
dets_num,
iou_threshold,
dets_sorted.data<scalar_t>(),
(unsigned long long*)mask.data<int64_t>());
});
at::Tensor mask_cpu = mask.to(at::kCPU);
unsigned long long* mask_host = (unsigned long long*)mask_cpu.data<int64_t>();
std::vector<unsigned long long> remv(col_blocks);
memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
at::Tensor keep =
at::empty({dets_num}, dets.options().dtype(at::kLong).device(at::kCPU));
int64_t* keep_out = keep.data<int64_t>();
int num_to_keep = 0;
for (int i = 0; i < dets_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep_out[num_to_keep++] = i;
unsigned long long* p = mask_host + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
AT_CUDA_CHECK(cudaGetLastError());
return order_t.index(
{keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep)
.to(order_t.device(), keep.scalar_type())});
}
} // namespace detectron2
|
acc830c1643cf858e93b63130c05d999a4a7147f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/HIPContext.h>
#include <ATen/InitialTensorOptions.h>
#include <ATen/native/hip/Resize.h>
#include <ATen/native/TensorFactories.h>
#include <ATen/NativeFunctions.h>
#include <c10/util/accumulate.h>
#include <c10/util/Exception.h>
#include <algorithm>
#include <cmath>
#include <cstddef>
namespace at {
namespace native {
Tensor& eye_out_cuda(int64_t n, Tensor& result) {
// the default value of `m` equals to `n`
return at::native::eye_out_cuda(n, n, result);
}
Tensor& eye_out_cuda(int64_t n, int64_t m, Tensor& result) {
TORCH_CHECK(n >= 0, "n must be greater or equal to 0, got ", n);
TORCH_CHECK(m >= 0, "m must be greater or equal to 0, got ", m);
result.resize_({n, m});
result.zero_();
int64_t sz = std::min<int64_t>(n, m);
int64_t stride = result.stride(0) + result.stride(1);
Tensor diag = result.as_strided({sz}, {stride});
diag.fill_(1);
return result;
}
Tensor empty_cuda(IntArrayRef size, c10::optional<ScalarType> dtype_opt, c10::optional<Layout> layout_opt, c10::optional<Device> device_opt, c10::optional<bool> pin_memory_opt, c10::optional<c10::MemoryFormat> memory_format_opt) {
AT_ASSERT(device_or_default(device_opt).is_cuda());
TORCH_CHECK(!pin_memory_opt.has_value() || !*pin_memory_opt, "Only dense CPU tensors can be pinned");
check_size_nonnegative(size);
auto* allocator = at::cuda::getCUDADeviceAllocator();
int64_t nelements = c10::multiply_integers(size);
auto dtype = dtype_or_default(dtype_opt);
auto dtype_meta = scalarTypeToTypeMeta(dtype);
int64_t size_bytes = nelements * dtype_meta.itemsize();
auto storage_impl = c10::make_intrusive<StorageImpl>(
c10::StorageImpl::use_byte_size_t(),
size_bytes,
allocator->allocate(size_bytes),
allocator,
/*resizeable=*/true);
auto tensor =
detail::make_tensor<TensorImpl>(storage_impl, DispatchKey::CUDA, dtype_meta);
// Default TensorImpl has size [0]
if (size.size() != 1 || size[0] != 0) {
tensor.unsafeGetTensorImpl()->set_sizes_contiguous(size);
}
auto memory_format = memory_format_opt.value_or(MemoryFormat::Contiguous);
tensor.unsafeGetTensorImpl()->empty_tensor_restride(memory_format);
return tensor;
}
Tensor empty_strided_cuda(IntArrayRef size, IntArrayRef stride, c10::optional<ScalarType> dtype_opt, c10::optional<Layout> layout_opt, c10::optional<Device> device_opt, c10::optional<bool> pin_memory_opt) {
auto t = at::native::empty_cuda({0}, dtype_opt, layout_opt, device_opt, pin_memory_opt);
at::native::resize_impl_cuda_(t.unsafeGetTensorImpl(), size, stride);
return t;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triangle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
namespace {
// To find the max integer that does not exceed the root of an int64_t variable,
// we could use a loop to test one bit at a time, which takes up to 31
// iterations. This would give the accurate result, but is relatively slow and
// is an overkill for most cases where double's precision suffice.
//
// If we directly use sqrt to calculate the root, the conversion from int64_t
// to double would lose 11 bits precision.
//
// The following solution uses sqrt directly for most cases, and would only
// special handle it if there is indeed precision loss.
__device__
inline int64_t resolve_root_int(
int64_t b, int64_t cX4, int64_t x, int32_t sign) {
int64_t bXb_cX4 = b*b - cX4;
// potential precision loss could occur here when casting int64_t (63 bits
// precision) to double (52 bits precision)
double sr = ::sqrt((double)bXb_cX4);
int64_t res = ::__double2ll_rd((-b + sign * sr)/2);
// have to cast double to int64_t, otherwise it would only compare up to the
// precision of a double variable, ignoring the precision loss
if (bXb_cX4 != (int64_t) (sr * sr)) {
// handle precision loss by using binary search
int64_t llsr = ::__double2ll_rd(sr);
// Use the following math to reduce search space.
// Suppose z is the accurate result of sqrt(bXb_cX4) without precision loss
// let d = abs(bXb_cX4 - llsr * llsr), then we have:
// z = sqrt(bXb_cX4) <= sqrt(llsr * llsr + d) <= llsr + sqrt(d)
// z = sqrt(bXb_cX4) >= sqrt(llsr * llsr - d) >= llsr - sqrt(d)
// Hence, it is sufficient to search range [llsr - sqrt(d), llsr + sqrt(d)).
// And the true value of row would also be with in range,
// [res - sqrt(d), res + sqrt(d) + 1)
// as the denominator would only reduce the precision penalty.
int64_t diff =
::__double2ll_ru(::sqrt(::fabs((double)(bXb_cX4 - llsr * llsr))));
// l never exceeds (could equal to) the target row index
auto l = res > diff ? res - diff : 0;
// r is always larger than the target row index
auto r = res + diff + 1;
// binary search for the correct answer
x <<= 1; // the loop always compares with 2x, so do it once here
while (l + 1 < r) {
auto m = (l + r) >> 1;
// for tril:
// b = 2f - 1, sign = 1, hence (2f + m - 1) * m / 2
// for triu:
// b = -2f - 1, sign = -1, hence (2f - m + 1) * m / 2
if (sign * (b + m) * m > x) {
r = m;
} else {
l = m;
}
}
res = l;
}
return res;
}
// f: the number of elements in the first row of the trapezoid.
// x: the index of the target coordinates ordered by row and then column.
//
// View the tril as a top trapezoid stacked on a bottom rectangle. Assume x
// corresponds to the coordinate (row, col) in the trapezoid, where the row and
// the col both start from 0, then we have:
//
// (f + f + row - 1) * row / 2 <= x [1]
// (f + f + row) * (row + 1) / 2 > x [2]
//
// Therefore, row is the maximum integer satisfying the following inequality:
//
// (row + 2f - 1)row <= 2x
// row^2 + (2f-1)row - 2x <= 0. [3]
//
// Based on inequality [3], we have the following coefficients for formula of
// root:
// a = 1
// b = 2f - 1
// c = -2x
// There are two roots, and we should use the largest integer that does not
// exceed the root on the right. Intuitively, it is because:
// i) the valid solution range of row is between two roots, as it is <= 0;
// ii) as we count in more rows, the total # of elements should always
// increase, hence so does the left-hand side row^2 + (2f-1)row - 2x.
// Therefore, the valid range of row lies in between the nadir point and
// the larger root on the right.
// Full proof can be derived from inequality [2]. So, we calculate the result
// coordinate as:
//
// row = floor((-b + sqrt(b^2 - 4c)) / 2)
// col = x - (f + f + row - 1) * row / 2
__device__
inline void get_coordinate_in_tril_trapezoid(
int64_t f, int64_t x, int64_t & row, int64_t & col) {
f <<= 1; // all statements use 2f, so only calculate it once here.
auto b = f - 1;
auto cX4 = - (x << 3); // 4 * c = 4 * (-2x) = -8x;
row = resolve_root_int(b, cX4, x, 1);
col = x - ((f + row - 1) * row >> 1);
}
// f: the number of elements in the first row of the bottom trapezoid.
// x: the index of the target coordinates ordered by row and then column.
//
// View the triu as a top rectangle stacked on a bottom trapezoid, where the
// trapezoid is upside down. Assume x corresponds to the coordinate (row, col)
// in the bottom trapezoid, where the row and the col start from 0, then we
// have:
//
// (f + f - row + 1) * row / 2 <= x [1]
// (f + f - row) * (row + 1) / 2 > x [2]
//
// Therefore, row is the maximum integer satisfying the following inequality:
//
// (-row + 2f + 1)row <= 2x
// row^2 - (2f+1)row + 2x >= 0. [3]
//
// Based on inequality [3], we have the following coefficients for formula of
// root:
// a = 1
// b = -1 - 2f
// c = 2x
// There are two roots, and we should use the largest integer that does not
// exceed the root on the left. Intuitively, it is because:
// i) the valid solution range of row is outside of the two roots, as it is <
// > 0;
// ii) as we count in more rows, the total # of elements should always
// increase, hence so does the left-hand side row^2 - (2f+1)row + 2x.
// Therefore, the valid range of row lies to the left of the smaller root
// on the left.
// Full proof can be derived from inequality [2]. So, we calculate the result
// coordinate as:
//
// row = floor((-b - sqrt(b^2 - 4c)) / 2)
// col = x - (f + f - row + 1) * row / 2
__device__
inline void get_coordinate_in_triu_trapezoid(
int64_t f, int64_t x, int64_t & row, int64_t & col) {
f <<= 1; // all statements use 2f, so only calculate it once here.
auto b = -1 - f;
auto cX4 = x << 3; // 4 * c = 4 * (2x) = 8x;
row = resolve_root_int(b, cX4, x, -1);
col = x - ((f - row + 1) * row >> 1) + row;
}
} // namespace
template <typename scalar_t>
__global__
#if defined(USE_ROCM)
C10_LAUNCH_BOUNDS_1(512)
#endif
void tril_indices_kernel(scalar_t * tensor,
int64_t row_offset,
int64_t m_first_row,
int64_t col,
int64_t trapezoid_size,
int64_t tril_size) {
int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index < tril_size) {
int64_t r, c;
if (linear_index < trapezoid_size) {
// the coordinate is within the top trapezoid
get_coordinate_in_tril_trapezoid(m_first_row, linear_index, r, c);
} else {
// the coordinate falls in the bottom rectangle
auto surplus = linear_index - trapezoid_size;
// add the height of trapezoid: m_last_row (col) - m_first_row + 1
r = surplus / col + col - m_first_row + 1;
c = surplus % col;
}
r += row_offset;
tensor[linear_index] = r;
tensor[linear_index + tril_size] = c;
}
}
// Some Large test cases for the fallback binary search path is disabled by
// default to speed up CI tests and to avoid OOM error. When modifying the
// implementation, please enable them in test/test_cuda.py and make sure they
// pass on your local server.
Tensor tril_indices_cuda(
int64_t row, int64_t col, int64_t offset, c10::optional<ScalarType> dtype_opt,
c10::optional<Layout> layout_opt, c10::optional<Device> device_opt, c10::optional<bool> pin_memory_opt) {
check_args(row, col, layout_opt);
auto tril_size = get_tril_size(row, col, offset);
auto tensor = empty_cuda({2, tril_size}, dtype_opt, layout_opt, device_opt, pin_memory_opt);
if (tril_size > 0) {
auto m_first_row = offset > 0 ?
std::min<int64_t>(col, 1 + offset) : // upper bounded by col
row + offset > 0; // either 0 or 1
auto trapezoid_row_offset = std::max<int64_t>(0, -offset);
auto rectangle_row_offset = trapezoid_row_offset + col - m_first_row + 1;
int64_t rectangle_size = 0;
if (rectangle_row_offset < row) {
rectangle_size = (row - rectangle_row_offset) * col;
}
dim3 dim_block = cuda::getApplyBlock();
dim3 dim_grid;
// using tril_size instead of tensor.numel(), as each thread takes care of
// two elements in the tensor.
TORCH_CHECK(
cuda::getApplyGrid(tril_size, dim_grid, tensor.get_device()),
"unable to get dim grid");
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, tensor.scalar_type(), "tril_indices_cuda", [&] {
hipLaunchKernelGGL(( tril_indices_kernel),
dim3(dim_grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
tensor.data_ptr<scalar_t>(),
trapezoid_row_offset,
m_first_row,
col,
tril_size - rectangle_size,
tril_size);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
}
return tensor;
}
template <typename scalar_t>
__global__
void triu_indices_kernel(scalar_t * tensor,
int64_t col_offset,
int64_t m_first_row,
int64_t col,
int64_t rectangle_size,
int64_t triu_size) {
int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index < triu_size) {
int64_t r, c;
if (linear_index < rectangle_size) {
// the coordinate is within the top rectangle
r = linear_index / col;
c = linear_index % col;
} else {
// the coordinate falls in the bottom trapezoid
get_coordinate_in_triu_trapezoid(
m_first_row, linear_index - rectangle_size, r, c);
r += rectangle_size / col;
}
c += col_offset;
tensor[linear_index] = r;
tensor[linear_index + triu_size] = c;
}
}
// Some Large test cases for the fallback binary search path is disabled by
// default to speed up CI tests and to avoid OOM error. When modifying the
// implementation, please enable them in test/test_cuda.py and make sure they
// pass on your local server.
Tensor triu_indices_cuda(
int64_t row, int64_t col, int64_t offset, c10::optional<ScalarType> dtype_opt,
c10::optional<Layout> layout_opt, c10::optional<Device> device_opt, c10::optional<bool> pin_memory_opt) {
check_args(row, col, layout_opt);
auto triu_size = row * col - get_tril_size(row, col, offset - 1);
auto tensor = empty_cuda({2, triu_size}, dtype_opt, layout_opt, device_opt, pin_memory_opt);
if (triu_size > 0) {
// # of triu elements in the first row
auto m_first_row = offset > 0 ?
std::max<int64_t>(col - offset, 0) : // upper bounded by col
col;
// size of the top rectangle
int64_t rectangle_size = 0;
if (offset < 0) {
rectangle_size = std::min<int64_t>(row, -offset) * col;
}
dim3 dim_block = cuda::getApplyBlock();
dim3 dim_grid;
// using triu_size instead of tensor.numel(), as each thread takes care of
// two elements in the tensor.
TORCH_CHECK(
cuda::getApplyGrid(triu_size, dim_grid, tensor.get_device()),
"unable to get dim grid");
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, tensor.scalar_type(), "triu_indices_cuda", [&] {
hipLaunchKernelGGL(( triu_indices_kernel),
dim3(dim_grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
tensor.data_ptr<scalar_t>(),
std::max<int64_t>(0, offset),
m_first_row,
col,
rectangle_size,
triu_size);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
}
return tensor;
}
}} // namespace at::native
| acc830c1643cf858e93b63130c05d999a4a7147f.cu | #include <ATen/ATen.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/InitialTensorOptions.h>
#include <ATen/native/cuda/Resize.h>
#include <ATen/native/TensorFactories.h>
#include <ATen/NativeFunctions.h>
#include <c10/util/accumulate.h>
#include <c10/util/Exception.h>
#include <algorithm>
#include <cmath>
#include <cstddef>
namespace at {
namespace native {
Tensor& eye_out_cuda(int64_t n, Tensor& result) {
// the default value of `m` equals to `n`
return at::native::eye_out_cuda(n, n, result);
}
Tensor& eye_out_cuda(int64_t n, int64_t m, Tensor& result) {
TORCH_CHECK(n >= 0, "n must be greater or equal to 0, got ", n);
TORCH_CHECK(m >= 0, "m must be greater or equal to 0, got ", m);
result.resize_({n, m});
result.zero_();
int64_t sz = std::min<int64_t>(n, m);
int64_t stride = result.stride(0) + result.stride(1);
Tensor diag = result.as_strided({sz}, {stride});
diag.fill_(1);
return result;
}
Tensor empty_cuda(IntArrayRef size, c10::optional<ScalarType> dtype_opt, c10::optional<Layout> layout_opt, c10::optional<Device> device_opt, c10::optional<bool> pin_memory_opt, c10::optional<c10::MemoryFormat> memory_format_opt) {
AT_ASSERT(device_or_default(device_opt).is_cuda());
TORCH_CHECK(!pin_memory_opt.has_value() || !*pin_memory_opt, "Only dense CPU tensors can be pinned");
check_size_nonnegative(size);
auto* allocator = at::cuda::getCUDADeviceAllocator();
int64_t nelements = c10::multiply_integers(size);
auto dtype = dtype_or_default(dtype_opt);
auto dtype_meta = scalarTypeToTypeMeta(dtype);
int64_t size_bytes = nelements * dtype_meta.itemsize();
auto storage_impl = c10::make_intrusive<StorageImpl>(
c10::StorageImpl::use_byte_size_t(),
size_bytes,
allocator->allocate(size_bytes),
allocator,
/*resizeable=*/true);
auto tensor =
detail::make_tensor<TensorImpl>(storage_impl, DispatchKey::CUDA, dtype_meta);
// Default TensorImpl has size [0]
if (size.size() != 1 || size[0] != 0) {
tensor.unsafeGetTensorImpl()->set_sizes_contiguous(size);
}
auto memory_format = memory_format_opt.value_or(MemoryFormat::Contiguous);
tensor.unsafeGetTensorImpl()->empty_tensor_restride(memory_format);
return tensor;
}
Tensor empty_strided_cuda(IntArrayRef size, IntArrayRef stride, c10::optional<ScalarType> dtype_opt, c10::optional<Layout> layout_opt, c10::optional<Device> device_opt, c10::optional<bool> pin_memory_opt) {
auto t = at::native::empty_cuda({0}, dtype_opt, layout_opt, device_opt, pin_memory_opt);
at::native::resize_impl_cuda_(t.unsafeGetTensorImpl(), size, stride);
return t;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triangle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
namespace {
// To find the max integer that does not exceed the root of an int64_t variable,
// we could use a loop to test one bit at a time, which takes up to 31
// iterations. This would give the accurate result, but is relatively slow and
// is an overkill for most cases where double's precision suffice.
//
// If we directly use sqrt to calculate the root, the conversion from int64_t
// to double would lose 11 bits precision.
//
// The following solution uses sqrt directly for most cases, and would only
// special handle it if there is indeed precision loss.
__device__
inline int64_t resolve_root_int(
int64_t b, int64_t cX4, int64_t x, int32_t sign) {
int64_t bXb_cX4 = b*b - cX4;
// potential precision loss could occur here when casting int64_t (63 bits
// precision) to double (52 bits precision)
double sr = ::sqrt((double)bXb_cX4);
int64_t res = ::__double2ll_rd((-b + sign * sr)/2);
// have to cast double to int64_t, otherwise it would only compare up to the
// precision of a double variable, ignoring the precision loss
if (bXb_cX4 != (int64_t) (sr * sr)) {
// handle precision loss by using binary search
int64_t llsr = ::__double2ll_rd(sr);
// Use the following math to reduce search space.
// Suppose z is the accurate result of sqrt(bXb_cX4) without precision loss
// let d = abs(bXb_cX4 - llsr * llsr), then we have:
// z = sqrt(bXb_cX4) <= sqrt(llsr * llsr + d) <= llsr + sqrt(d)
// z = sqrt(bXb_cX4) >= sqrt(llsr * llsr - d) >= llsr - sqrt(d)
// Hence, it is sufficient to search range [llsr - sqrt(d), llsr + sqrt(d)).
// And the true value of row would also be with in range,
// [res - sqrt(d), res + sqrt(d) + 1)
// as the denominator would only reduce the precision penalty.
int64_t diff =
::__double2ll_ru(::sqrt(::fabs((double)(bXb_cX4 - llsr * llsr))));
// l never exceeds (could equal to) the target row index
auto l = res > diff ? res - diff : 0;
// r is always larger than the target row index
auto r = res + diff + 1;
// binary search for the correct answer
x <<= 1; // the loop always compares with 2x, so do it once here
while (l + 1 < r) {
auto m = (l + r) >> 1;
// for tril:
// b = 2f - 1, sign = 1, hence (2f + m - 1) * m / 2
// for triu:
// b = -2f - 1, sign = -1, hence (2f - m + 1) * m / 2
if (sign * (b + m) * m > x) {
r = m;
} else {
l = m;
}
}
res = l;
}
return res;
}
// f: the number of elements in the first row of the trapezoid.
// x: the index of the target coordinates ordered by row and then column.
//
// View the tril as a top trapezoid stacked on a bottom rectangle. Assume x
// corresponds to the coordinate (row, col) in the trapezoid, where the row and
// the col both start from 0, then we have:
//
// (f + f + row - 1) * row / 2 <= x [1]
// (f + f + row) * (row + 1) / 2 > x [2]
//
// Therefore, row is the maximum integer satisfying the following inequality:
//
// (row + 2f - 1)row <= 2x
// row^2 + (2f-1)row - 2x <= 0. [3]
//
// Based on inequality [3], we have the following coefficients for formula of
// root:
// a = 1
// b = 2f - 1
// c = -2x
// There are two roots, and we should use the largest integer that does not
// exceed the root on the right. Intuitively, it is because:
// i) the valid solution range of row is between two roots, as it is <= 0;
// ii) as we count in more rows, the total # of elements should always
// increase, hence so does the left-hand side row^2 + (2f-1)row - 2x.
// Therefore, the valid range of row lies in between the nadir point and
// the larger root on the right.
// Full proof can be derived from inequality [2]. So, we calculate the result
// coordinate as:
//
// row = floor((-b + sqrt(b^2 - 4c)) / 2)
// col = x - (f + f + row - 1) * row / 2
__device__
inline void get_coordinate_in_tril_trapezoid(
int64_t f, int64_t x, int64_t & row, int64_t & col) {
f <<= 1; // all statements use 2f, so only calculate it once here.
auto b = f - 1;
auto cX4 = - (x << 3); // 4 * c = 4 * (-2x) = -8x;
row = resolve_root_int(b, cX4, x, 1);
col = x - ((f + row - 1) * row >> 1);
}
// f: the number of elements in the first row of the bottom trapezoid.
// x: the index of the target coordinates ordered by row and then column.
//
// View the triu as a top rectangle stacked on a bottom trapezoid, where the
// trapezoid is upside down. Assume x corresponds to the coordinate (row, col)
// in the bottom trapezoid, where the row and the col start from 0, then we
// have:
//
// (f + f - row + 1) * row / 2 <= x [1]
// (f + f - row) * (row + 1) / 2 > x [2]
//
// Therefore, row is the maximum integer satisfying the following inequality:
//
// (-row + 2f + 1)row <= 2x
// row^2 - (2f+1)row + 2x >= 0. [3]
//
// Based on inequality [3], we have the following coefficients for formula of
// root:
// a = 1
// b = -1 - 2f
// c = 2x
// There are two roots, and we should use the largest integer that does not
// exceed the root on the left. Intuitively, it is because:
// i) the valid solution range of row is outside of the two roots, as it is <
// > 0;
// ii) as we count in more rows, the total # of elements should always
// increase, hence so does the left-hand side row^2 - (2f+1)row + 2x.
// Therefore, the valid range of row lies to the left of the smaller root
// on the left.
// Full proof can be derived from inequality [2]. So, we calculate the result
// coordinate as:
//
// row = floor((-b - sqrt(b^2 - 4c)) / 2)
// col = x - (f + f - row + 1) * row / 2
__device__
inline void get_coordinate_in_triu_trapezoid(
int64_t f, int64_t x, int64_t & row, int64_t & col) {
f <<= 1; // all statements use 2f, so only calculate it once here.
auto b = -1 - f;
auto cX4 = x << 3; // 4 * c = 4 * (2x) = 8x;
row = resolve_root_int(b, cX4, x, -1);
col = x - ((f - row + 1) * row >> 1) + row;
}
} // namespace
template <typename scalar_t>
__global__
#if defined(USE_ROCM)
C10_LAUNCH_BOUNDS_1(512)
#endif
void tril_indices_kernel(scalar_t * tensor,
int64_t row_offset,
int64_t m_first_row,
int64_t col,
int64_t trapezoid_size,
int64_t tril_size) {
int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index < tril_size) {
int64_t r, c;
if (linear_index < trapezoid_size) {
// the coordinate is within the top trapezoid
get_coordinate_in_tril_trapezoid(m_first_row, linear_index, r, c);
} else {
// the coordinate falls in the bottom rectangle
auto surplus = linear_index - trapezoid_size;
// add the height of trapezoid: m_last_row (col) - m_first_row + 1
r = surplus / col + col - m_first_row + 1;
c = surplus % col;
}
r += row_offset;
tensor[linear_index] = r;
tensor[linear_index + tril_size] = c;
}
}
// Some Large test cases for the fallback binary search path is disabled by
// default to speed up CI tests and to avoid OOM error. When modifying the
// implementation, please enable them in test/test_cuda.py and make sure they
// pass on your local server.
Tensor tril_indices_cuda(
int64_t row, int64_t col, int64_t offset, c10::optional<ScalarType> dtype_opt,
c10::optional<Layout> layout_opt, c10::optional<Device> device_opt, c10::optional<bool> pin_memory_opt) {
check_args(row, col, layout_opt);
auto tril_size = get_tril_size(row, col, offset);
auto tensor = empty_cuda({2, tril_size}, dtype_opt, layout_opt, device_opt, pin_memory_opt);
if (tril_size > 0) {
auto m_first_row = offset > 0 ?
std::min<int64_t>(col, 1 + offset) : // upper bounded by col
row + offset > 0; // either 0 or 1
auto trapezoid_row_offset = std::max<int64_t>(0, -offset);
auto rectangle_row_offset = trapezoid_row_offset + col - m_first_row + 1;
int64_t rectangle_size = 0;
if (rectangle_row_offset < row) {
rectangle_size = (row - rectangle_row_offset) * col;
}
dim3 dim_block = cuda::getApplyBlock();
dim3 dim_grid;
// using tril_size instead of tensor.numel(), as each thread takes care of
// two elements in the tensor.
TORCH_CHECK(
cuda::getApplyGrid(tril_size, dim_grid, tensor.get_device()),
"unable to get dim grid");
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, tensor.scalar_type(), "tril_indices_cuda", [&] {
tril_indices_kernel<<<
dim_grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(
tensor.data_ptr<scalar_t>(),
trapezoid_row_offset,
m_first_row,
col,
tril_size - rectangle_size,
tril_size);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
}
return tensor;
}
template <typename scalar_t>
__global__
void triu_indices_kernel(scalar_t * tensor,
int64_t col_offset,
int64_t m_first_row,
int64_t col,
int64_t rectangle_size,
int64_t triu_size) {
int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index < triu_size) {
int64_t r, c;
if (linear_index < rectangle_size) {
// the coordinate is within the top rectangle
r = linear_index / col;
c = linear_index % col;
} else {
// the coordinate falls in the bottom trapezoid
get_coordinate_in_triu_trapezoid(
m_first_row, linear_index - rectangle_size, r, c);
r += rectangle_size / col;
}
c += col_offset;
tensor[linear_index] = r;
tensor[linear_index + triu_size] = c;
}
}
// Some Large test cases for the fallback binary search path is disabled by
// default to speed up CI tests and to avoid OOM error. When modifying the
// implementation, please enable them in test/test_cuda.py and make sure they
// pass on your local server.
Tensor triu_indices_cuda(
int64_t row, int64_t col, int64_t offset, c10::optional<ScalarType> dtype_opt,
c10::optional<Layout> layout_opt, c10::optional<Device> device_opt, c10::optional<bool> pin_memory_opt) {
check_args(row, col, layout_opt);
auto triu_size = row * col - get_tril_size(row, col, offset - 1);
auto tensor = empty_cuda({2, triu_size}, dtype_opt, layout_opt, device_opt, pin_memory_opt);
if (triu_size > 0) {
// # of triu elements in the first row
auto m_first_row = offset > 0 ?
std::max<int64_t>(col - offset, 0) : // upper bounded by col
col;
// size of the top rectangle
int64_t rectangle_size = 0;
if (offset < 0) {
rectangle_size = std::min<int64_t>(row, -offset) * col;
}
dim3 dim_block = cuda::getApplyBlock();
dim3 dim_grid;
// using triu_size instead of tensor.numel(), as each thread takes care of
// two elements in the tensor.
TORCH_CHECK(
cuda::getApplyGrid(triu_size, dim_grid, tensor.get_device()),
"unable to get dim grid");
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, tensor.scalar_type(), "triu_indices_cuda", [&] {
triu_indices_kernel<<<
dim_grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(
tensor.data_ptr<scalar_t>(),
std::max<int64_t>(0, offset),
m_first_row,
col,
rectangle_size,
triu_size);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
}
return tensor;
}
}} // namespace at::native
|
423d68dd4756f54f7867a4a4f15d397efe5f4471.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define PI 3.141592653589793238462643
#define blocDim 256
#define powOfTwo 4
#define timerCount 10
#define min(a,b) \
({ __typeof__ (a) _a = (a); \
__typeof__ (b) _b = (b); \
_a < _b ? _a : _b; })
__global__ void guts(const long int N, const long int M,const double deltaX,const double * x,double * b)
{
__shared__ double thisBlock[blocDim];
__shared__ long int k,l,blockPos,start;//,fullNeeded,needed;
double FuncTemp = 0;
long int threadPos;
if(threadIdx.x==0){
k = blockIdx.x+2;
l = blockIdx.y;
start = l*(N+1);
blockPos = k+(N+1)*l;
//fullNeeded = k+1 + (k+1)%2;
//needed = min(fullNeeded,blocDim);
}
thisBlock[threadIdx.x] = 0;
__syncthreads();
threadPos = threadIdx.x;
while(threadPos<=k){
if(threadPos==0){
FuncTemp += x[start+threadPos];
}else if(threadPos==1){
if(k==2){
FuncTemp += deltaX*x[start+threadPos]/4.0;
}else{
FuncTemp += deltaX*x[start+threadPos]/2.0;
}
}else if(threadPos<(k-1)){
FuncTemp += deltaX*x[start+threadPos];
}else if(threadPos==(k-1)){
FuncTemp += 3.0/4.0*deltaX*x[start+threadPos];
}else if(threadPos==k){
FuncTemp += deltaX*x[start+threadPos]/4.0;
}else{
FuncTemp += 0;
}
threadPos += blockDim.x;
}
thisBlock[threadIdx.x] = FuncTemp;
__syncthreads();
for(int i=blocDim/2;i>0;i=i/2){
if(threadIdx.x<i){
thisBlock[threadIdx.x] += thisBlock[threadIdx.x+i];
}
__syncthreads();
}
if(threadIdx.x==0){
b[blockPos] = thisBlock[0];
}
}
| 423d68dd4756f54f7867a4a4f15d397efe5f4471.cu | #define PI 3.141592653589793238462643
#define blocDim 256
#define powOfTwo 4
#define timerCount 10
#define min(a,b) \
({ __typeof__ (a) _a = (a); \
__typeof__ (b) _b = (b); \
_a < _b ? _a : _b; })
__global__ void guts(const long int N, const long int M,const double deltaX,const double * x,double * b)
{
__shared__ double thisBlock[blocDim];
__shared__ long int k,l,blockPos,start;//,fullNeeded,needed;
double FuncTemp = 0;
long int threadPos;
if(threadIdx.x==0){
k = blockIdx.x+2;
l = blockIdx.y;
start = l*(N+1);
blockPos = k+(N+1)*l;
//fullNeeded = k+1 + (k+1)%2;
//needed = min(fullNeeded,blocDim);
}
thisBlock[threadIdx.x] = 0;
__syncthreads();
threadPos = threadIdx.x;
while(threadPos<=k){
if(threadPos==0){
FuncTemp += x[start+threadPos];
}else if(threadPos==1){
if(k==2){
FuncTemp += deltaX*x[start+threadPos]/4.0;
}else{
FuncTemp += deltaX*x[start+threadPos]/2.0;
}
}else if(threadPos<(k-1)){
FuncTemp += deltaX*x[start+threadPos];
}else if(threadPos==(k-1)){
FuncTemp += 3.0/4.0*deltaX*x[start+threadPos];
}else if(threadPos==k){
FuncTemp += deltaX*x[start+threadPos]/4.0;
}else{
FuncTemp += 0;
}
threadPos += blockDim.x;
}
thisBlock[threadIdx.x] = FuncTemp;
__syncthreads();
for(int i=blocDim/2;i>0;i=i/2){
if(threadIdx.x<i){
thisBlock[threadIdx.x] += thisBlock[threadIdx.x+i];
}
__syncthreads();
}
if(threadIdx.x==0){
b[blockPos] = thisBlock[0];
}
}
|
34e82278c597f7252547824e584a2c40d3347420.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../include/backprop.h"
#include <helper_cuda.h>
int main(int argc, char **argv){
printf("testing the outer layer using back propagation\n");
uint8_t *images = get_data("train-images.idx3-ubyte");
uint8_t *labels = get_data("train-labels.idx1-ubyte");
//allocate space on the host
unsigned int size_weights = sizeof(float) * OUTPUT_LAYER_NEURONS*MAX_NUM_WEIGHTS;
float *h_weights = (float*)malloc(size_weights);
unsigned int size_neurons = sizeof(float) * OUTPUT_LAYER_NEURONS;
float *h_layer_input = (float*)malloc(size_neurons);
float *h_actual_output = (float*)malloc(size_neurons);
float *h_desired_output = (float*)malloc(size_neurons);
float *h_error_prev = (float*)malloc(size_neurons);
if(!h_error_prev or !h_layer_input or !h_actual_output or !h_weights or !h_desired_output){
printf("unable to create host pointer\n");
return 1;
}
//init the data for the function call
for(int i = 0; i < OUTPUT_LAYER_NEURONS; i++){
h_layer_input[i] = 0.5;
h_desired_output[i] = 0;
h_actual_output[i] = 0;
}
h_actual_output[0] = 0.8;
h_actual_output[1] = 0.8;
printf("testing with character %i \n", (int)labels[0]);
h_desired_output[(int)labels[0]] = 0.8;
for(int i = 0; i < OUTPUT_LAYER_NEURONS*MAX_NUM_WEIGHTS; i++){
h_weights[i] = 0.001f;
}
//allocate space on the device
float *d_weights;
hipMalloc((void**)&d_weights, size_weights);
float *d_layer_input;
hipMalloc((void **)&d_layer_input, size_neurons);
float *d_desired_output;
hipMalloc((void **)&d_desired_output, size_neurons);
float *d_actual_output;
hipMalloc((void **)&d_actual_output, size_neurons);
float *d_error_prev;
hipMalloc((void **)&d_error_prev, size_neurons);
hipError_t error;
//copy from cpu(host) to the gpu(device)
error = hipMemcpy(d_weights, h_weights, size_weights, hipMemcpyHostToDevice);
if (error != hipSuccess)
{
printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = hipMemcpy(d_layer_input, h_layer_input, size_neurons, hipMemcpyHostToDevice);
if (error != hipSuccess)
{
printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = hipMemcpy(d_desired_output, h_desired_output, size_neurons, hipMemcpyHostToDevice);
if (error != hipSuccess)
{
printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = hipMemcpy(d_actual_output, h_actual_output, size_neurons, hipMemcpyHostToDevice);
if (error != hipSuccess)
{
printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
for(int i = 0; i < OUTPUT_LAYER_NEURONS; i++){
h_error_prev[i] = 0;
}
error = hipMemcpy(d_error_prev, h_error_prev, size_neurons, hipMemcpyHostToDevice);
if (error != hipSuccess)
{
printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
int deviceCount;
hipGetDeviceCount(&deviceCount);
int device;
hipDeviceProp_t deviceProp;
for (device = 0; device < deviceCount; ++device) {
hipGetDeviceProperties(&deviceProp, device);
printf("Device %d has compute capability %d.%d.\n", device, deviceProp.major, deviceProp.minor);
}
if(deviceProp.major == 1){
hipLaunchKernelGGL(( backprop_output_layer), dim3(512), dim3(512), 0, 0, MAX_NUM_WEIGHTS, OUTPUT_LAYER_NEURONS, d_layer_input, d_actual_output, d_desired_output,
d_weights, d_error_prev);
}
else{
//eval_layer<<<(int)MAX_NUM_NEURONS, (int)MAX_NUM_WEIGHTS>>>(d_input, d_weights, d_output#include "../include/feed_forward.h"
}
error = hipGetLastError();
printf("running eval_layer returned error code %s, line(%d)\n", hipGetErrorString(error), __LINE__);
//read back the output values from the layer
hipMemcpy(h_error_prev, d_error_prev, size_neurons, hipMemcpyDeviceToHost);
hipMemcpy(h_weights, d_weights, size_neurons, hipMemcpyDeviceToHost);
for(int i = 0; i < OUTPUT_LAYER_NEURONS; i++){
printf("error_prev: %f\n" , h_error_prev[i]);
}
for(int i = 0; i < 10; i++){
printf("weights: %f\n" , h_weights[i]);
}
hipFree(d_actual_output);hipFree(d_layer_input);hipFree(d_desired_output);hipFree(d_weights);
}
| 34e82278c597f7252547824e584a2c40d3347420.cu | #include "../include/backprop.h"
#include <helper_cuda.h>
int main(int argc, char **argv){
printf("testing the outer layer using back propagation\n");
uint8_t *images = get_data("train-images.idx3-ubyte");
uint8_t *labels = get_data("train-labels.idx1-ubyte");
//allocate space on the host
unsigned int size_weights = sizeof(float) * OUTPUT_LAYER_NEURONS*MAX_NUM_WEIGHTS;
float *h_weights = (float*)malloc(size_weights);
unsigned int size_neurons = sizeof(float) * OUTPUT_LAYER_NEURONS;
float *h_layer_input = (float*)malloc(size_neurons);
float *h_actual_output = (float*)malloc(size_neurons);
float *h_desired_output = (float*)malloc(size_neurons);
float *h_error_prev = (float*)malloc(size_neurons);
if(!h_error_prev or !h_layer_input or !h_actual_output or !h_weights or !h_desired_output){
printf("unable to create host pointer\n");
return 1;
}
//init the data for the function call
for(int i = 0; i < OUTPUT_LAYER_NEURONS; i++){
h_layer_input[i] = 0.5;
h_desired_output[i] = 0;
h_actual_output[i] = 0;
}
h_actual_output[0] = 0.8;
h_actual_output[1] = 0.8;
printf("testing with character %i \n", (int)labels[0]);
h_desired_output[(int)labels[0]] = 0.8;
for(int i = 0; i < OUTPUT_LAYER_NEURONS*MAX_NUM_WEIGHTS; i++){
h_weights[i] = 0.001f;
}
//allocate space on the device
float *d_weights;
cudaMalloc((void**)&d_weights, size_weights);
float *d_layer_input;
cudaMalloc((void **)&d_layer_input, size_neurons);
float *d_desired_output;
cudaMalloc((void **)&d_desired_output, size_neurons);
float *d_actual_output;
cudaMalloc((void **)&d_actual_output, size_neurons);
float *d_error_prev;
cudaMalloc((void **)&d_error_prev, size_neurons);
cudaError_t error;
//copy from cpu(host) to the gpu(device)
error = cudaMemcpy(d_weights, h_weights, size_weights, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMemcpy(d_layer_input, h_layer_input, size_neurons, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMemcpy(d_desired_output, h_desired_output, size_neurons, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMemcpy(d_actual_output, h_actual_output, size_neurons, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
for(int i = 0; i < OUTPUT_LAYER_NEURONS; i++){
h_error_prev[i] = 0;
}
error = cudaMemcpy(d_error_prev, h_error_prev, size_neurons, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
int deviceCount;
cudaGetDeviceCount(&deviceCount);
int device;
cudaDeviceProp deviceProp;
for (device = 0; device < deviceCount; ++device) {
cudaGetDeviceProperties(&deviceProp, device);
printf("Device %d has compute capability %d.%d.\n", device, deviceProp.major, deviceProp.minor);
}
if(deviceProp.major == 1){
backprop_output_layer<<<512, 512>>>(MAX_NUM_WEIGHTS, OUTPUT_LAYER_NEURONS, d_layer_input, d_actual_output, d_desired_output,
d_weights, d_error_prev);
}
else{
//eval_layer<<<(int)MAX_NUM_NEURONS, (int)MAX_NUM_WEIGHTS>>>(d_input, d_weights, d_output#include "../include/feed_forward.h"
}
error = cudaGetLastError();
printf("running eval_layer returned error code %s, line(%d)\n", cudaGetErrorString(error), __LINE__);
//read back the output values from the layer
cudaMemcpy(h_error_prev, d_error_prev, size_neurons, cudaMemcpyDeviceToHost);
cudaMemcpy(h_weights, d_weights, size_neurons, cudaMemcpyDeviceToHost);
for(int i = 0; i < OUTPUT_LAYER_NEURONS; i++){
printf("error_prev: %f\n" , h_error_prev[i]);
}
for(int i = 0; i < 10; i++){
printf("weights: %f\n" , h_weights[i]);
}
cudaFree(d_actual_output);cudaFree(d_layer_input);cudaFree(d_desired_output);cudaFree(d_weights);
}
|
b147c150c9f2dd4a560c63dbe3d58de0a1989797.hip | // !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/UnaryOps.h>
#include <limits>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/Math.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip/JitLoops.cuh>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/hip/Math.cuh>
#include <ATen/native/hip/jit_utils.h>
#include <ATen/NumericUtils.h>
#include <c10/core/Scalar.h>
#include <c10/hip/HIPMathCompat.h>
#include <c10/util/complex.h>
namespace at::native {
namespace {
const char modified_bessel_i0_name[] = "modified_bessel_i0_forward";
void modified_bessel_i0_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "modified_bessel_i0_cuda", [&]() {
jitted_gpu_kernel<modified_bessel_i0_name, scalar_t, scalar_t, 1>(iterator, modified_bessel_i0_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "modified_bessel_i0_cuda", [&]() {
gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return modified_bessel_i0_forward(a);
});
});
#endif // AT_USE_JITERATOR()
}
}
REGISTER_DISPATCH(special_modified_bessel_i0_stub, &modified_bessel_i0_kernel_cuda);
} // namespace at::native
| b147c150c9f2dd4a560c63dbe3d58de0a1989797.cu | #define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/UnaryOps.h>
#include <limits>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/Math.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/Math.cuh>
#include <ATen/native/cuda/jit_utils.h>
#include <ATen/NumericUtils.h>
#include <c10/core/Scalar.h>
#include <c10/cuda/CUDAMathCompat.h>
#include <c10/util/complex.h>
namespace at::native {
namespace {
const char modified_bessel_i0_name[] = "modified_bessel_i0_forward";
void modified_bessel_i0_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "modified_bessel_i0_cuda", [&]() {
jitted_gpu_kernel<modified_bessel_i0_name, scalar_t, scalar_t, 1>(iterator, modified_bessel_i0_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "modified_bessel_i0_cuda", [&]() {
gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return modified_bessel_i0_forward(a);
});
});
#endif // AT_USE_JITERATOR()
}
}
REGISTER_DISPATCH(special_modified_bessel_i0_stub, &modified_bessel_i0_kernel_cuda);
} // namespace at::native
|
8612abefa8bb941809bd51f511fb20fdd314e824.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "bp_gpu.cuh"
__global__ void bp_update_fc(int idx)
{
int i,j,k,l,m,n;
i=threadIdx.x+blockDim.x*blockIdx.x;
j=threadIdx.y+blockDim.y*blockIdx.y;
__shared__ float _fc2_delta[FC2_SIZE];
__shared__ float _fc1_delta_t[FC1_SIZE];
if(i<FC2_SIZE&&j==0)
{
_fc2_delta[i]=alpha*_C[idx%N_STREAM][i]*(_fc2_a[idx%N_STREAM][i]*(1.0-_fc2_a[idx%N_STREAM][i]));
_fc2_db[idx%N_STREAM][i]+=_fc2_delta[i];
}
__syncthreads();
if(i<FC2_SIZE&&j<FC1_SIZE)
_fc2_dw[idx%N_STREAM][i][j]+=_fc2_delta[i]*_fc1_a[idx%N_STREAM][j];
j=threadIdx.x+blockDim.x*blockIdx.x;
i=threadIdx.y+blockDim.y*blockIdx.y;
__syncthreads();
if(i<FC1_SIZE&&j==0)
{
float error=0;
for(j=0;j<FC2_SIZE;j++)
error+=_fc2_delta[j]*fc2_w[j][i];
_fc1_delta_t[i]=error*(_fc1_a[idx%N_STREAM][i]*(1.0-_fc1_a[idx%N_STREAM][i]));
// _fc1_delta_t[i]=error*(1.0-_fc1_a[idx%N_STREAM][i]*_fc1_a[idx%N_STREAM][i]);
_fc1_db[idx%N_STREAM][i]+=_fc1_delta_t[i];
_fc1_delta[idx%N_STREAM][i]=_fc1_delta_t[i];
}
j=threadIdx.x+blockDim.x*blockIdx.x;
i=threadIdx.y+blockDim.y*blockIdx.y;
__syncthreads();
if(i<FC1_SIZE&&j<CONV_W_NUM)
{
for(k=0;k<POOL_SIZE;k++)
for(l=0;l<POOL_SIZE;l++)
_fc1_dw[idx%N_STREAM][i][j][k][l]+=_fc1_delta_t[i]*_pool[idx%N_STREAM][j][k][l];
__syncthreads();
}
}
__global__ void bp_update_conv(int idx)
{
int i,j,k,l,m,n;
i=threadIdx.x+blockDim.x*blockIdx.x;
j=threadIdx.y+blockDim.y*blockIdx.y;
k=threadIdx.z+blockDim.z*blockIdx.z;
__shared__ float _conv_sigma_delta[CONV_W_NUM];
__shared__ float _conv_delta[CONV_W_NUM][POOL_SIZE][POOL_SIZE];
if(i<CONV_W_NUM&&j<POOL_SIZE&&k<POOL_SIZE)
{
float error=0;
_conv_delta[i][j][k]=0;
for(l=0;l<FC1_SIZE;l++)
error+=_fc1_delta[idx%N_STREAM][l]*fc1_w[l][i][j][k];
// _conv_delta[i][j][k]=error*(_pool[idx%N_STREAM][i][j][k]*(1.0-_pool[idx%N_STREAM][i][j][k]));
_conv_delta[i][j][k]=error*(1.0-_pool[idx%N_STREAM][i][j][k]*_pool[idx%N_STREAM][i][j][k]);
__syncthreads();
}
if(i<CONV_W_NUM&&j==0&&k==0)
{
_conv_sigma_delta[i]=0;
for(j=0;j<POOL_SIZE;j++)
for(k=0;k<POOL_SIZE;k++)
_conv_sigma_delta[i]+=_conv_delta[i][j][k];
_conv_db[idx%N_STREAM][i]+=_conv_sigma_delta[i];
}
i=threadIdx.x+blockDim.x*blockIdx.x;
j=threadIdx.y+blockDim.y*blockIdx.y;
k=threadIdx.z+blockDim.z*blockIdx.z;
__syncthreads();
if(i<CONV_W_NUM&&j<CONV_W_SIZE&&k<CONV_W_SIZE)
{
float error=0;
for(m=0;m<POOL_SIZE;m++)
for(n=0;n<POOL_SIZE;n++)
{
int x=_pool_pos[idx%N_STREAM][i][m][n]/2;
int y=_pool_pos[idx%N_STREAM][i][m][n]%2;
error+=_conv_delta[i][m][n]*_input[idx%N_STREAM][2*m+j+x][2*n+k+y];
}
_conv_dw[idx%N_STREAM][i][j][k]+=error;
__syncthreads();
}
}
void bp_update_gpu(int idx)
{
dim3 block1(16,64);
dim3 grid1(1,1);
dim3 block2(6,12,12);
dim3 grid2(1,1,1);
hipLaunchKernelGGL(( bp_update_fc), dim3(grid1),dim3(block1),0,stream[idx%N_STREAM], idx);
hipLaunchKernelGGL(( bp_update_conv), dim3(grid2),dim3(block2),0,stream[idx%N_STREAM], idx);
}
__global__ void bp_assign_grads_fc(int idx,int minibatch)
{
int i,j,k,l,m,n;
i=threadIdx.x+blockDim.x*blockIdx.x;
j=threadIdx.y+blockDim.y*blockIdx.y;
if(i<FC2_SIZE&&j==0)
{
for(j=0;j<minibatch;j++)
fc2_b[i]-=(_fc2_db[(idx-j)%N_STREAM][i]/minibatch);
for(j=0;j<minibatch;j++)
_fc2_db[(idx-j)%N_STREAM][i]=0;
}
i=threadIdx.x+blockDim.x*blockIdx.x;
j=threadIdx.y+blockDim.y*blockIdx.y;
if(i<FC2_SIZE&&j<FC1_SIZE)
{
for(k=0;k<minibatch;k++)
fc2_w[i][j]-=(_fc2_dw[(idx-k)%N_STREAM][i][j]/minibatch);
for(k=0;k<minibatch;k++)
_fc2_dw[(idx-k)%N_STREAM][i][j]=0;
}
j=threadIdx.x+blockDim.x*blockIdx.x;
i=threadIdx.y+blockDim.y*blockIdx.y;
if(i<FC1_SIZE&&j==0)
{
for(j=0;j<minibatch;j++)
fc1_b[i]-=(_fc1_db[(idx-j)%N_STREAM][i]/minibatch);
for(j=0;j<minibatch;j++)
_fc1_db[(idx-j)%N_STREAM][i]=0;
}
j=threadIdx.x+blockDim.x*blockIdx.x;
i=threadIdx.y+blockDim.y*blockIdx.y;
if(i<FC1_SIZE&&j<CONV_W_NUM)
{
for(k=0;k<POOL_SIZE;k++)
for(l=0;l<POOL_SIZE;l++)
{
for(int m=0;m<minibatch;m++)
fc1_w[i][j][k][l]-=(_fc1_dw[(idx-m)%N_STREAM][i][j][k][l]/minibatch);
for(int m=0;m<minibatch;m++)
_fc1_dw[(idx-m)%N_STREAM][i][j][k][l]=0;
}
}
}
__global__ void bp_assign_grads_conv(int idx,int minibatch)
{
int i,j,k,l,m,n;
i=threadIdx.x+blockDim.x*blockIdx.x;
j=threadIdx.y+blockDim.y*blockIdx.y;
k=threadIdx.z+blockDim.z*blockIdx.z;
if(i<CONV_W_NUM&&j==0&&k==0)
{
for(j=0;j<minibatch;j++)
conv_b[i]-=(_conv_db[(idx-j)%N_STREAM][i]/minibatch);
for(j=0;j<minibatch;j++)
_conv_db[(idx-j)%N_STREAM][i]=0;
}
i=threadIdx.x+blockDim.x*blockIdx.x;
j=threadIdx.y+blockDim.y*blockIdx.y;
k=threadIdx.z+blockDim.z*blockIdx.z;
if(i<CONV_W_NUM&&j<CONV_W_SIZE&&k<CONV_W_SIZE)
{
for(l=0;l<minibatch;l++)
conv_w[i][j][k]-=(_conv_dw[(idx-l)%N_STREAM][i][j][k]/minibatch);
for(l=0;l<minibatch;l++)
_conv_dw[(idx-l)%N_STREAM][i][j][k]=0;
}
}
void bp_assign_grads_gpu(int idx)
{
dim3 block1(16,64);
dim3 grid1(1,1);
dim3 block2(6,12,12);
dim3 grid2(1,1,1);
hipLaunchKernelGGL(( bp_assign_grads_fc), dim3(grid1),dim3(block1),0,stream[idx%N_STREAM], idx,minibatch);
hipLaunchKernelGGL(( bp_assign_grads_conv), dim3(grid2),dim3(block2),0,stream[idx%N_STREAM], idx,minibatch);
} | 8612abefa8bb941809bd51f511fb20fdd314e824.cu | #include "bp_gpu.cuh"
__global__ void bp_update_fc(int idx)
{
int i,j,k,l,m,n;
i=threadIdx.x+blockDim.x*blockIdx.x;
j=threadIdx.y+blockDim.y*blockIdx.y;
__shared__ float _fc2_delta[FC2_SIZE];
__shared__ float _fc1_delta_t[FC1_SIZE];
if(i<FC2_SIZE&&j==0)
{
_fc2_delta[i]=alpha*_C[idx%N_STREAM][i]*(_fc2_a[idx%N_STREAM][i]*(1.0-_fc2_a[idx%N_STREAM][i]));
_fc2_db[idx%N_STREAM][i]+=_fc2_delta[i];
}
__syncthreads();
if(i<FC2_SIZE&&j<FC1_SIZE)
_fc2_dw[idx%N_STREAM][i][j]+=_fc2_delta[i]*_fc1_a[idx%N_STREAM][j];
j=threadIdx.x+blockDim.x*blockIdx.x;
i=threadIdx.y+blockDim.y*blockIdx.y;
__syncthreads();
if(i<FC1_SIZE&&j==0)
{
float error=0;
for(j=0;j<FC2_SIZE;j++)
error+=_fc2_delta[j]*fc2_w[j][i];
_fc1_delta_t[i]=error*(_fc1_a[idx%N_STREAM][i]*(1.0-_fc1_a[idx%N_STREAM][i]));
// _fc1_delta_t[i]=error*(1.0-_fc1_a[idx%N_STREAM][i]*_fc1_a[idx%N_STREAM][i]);
_fc1_db[idx%N_STREAM][i]+=_fc1_delta_t[i];
_fc1_delta[idx%N_STREAM][i]=_fc1_delta_t[i];
}
j=threadIdx.x+blockDim.x*blockIdx.x;
i=threadIdx.y+blockDim.y*blockIdx.y;
__syncthreads();
if(i<FC1_SIZE&&j<CONV_W_NUM)
{
for(k=0;k<POOL_SIZE;k++)
for(l=0;l<POOL_SIZE;l++)
_fc1_dw[idx%N_STREAM][i][j][k][l]+=_fc1_delta_t[i]*_pool[idx%N_STREAM][j][k][l];
__syncthreads();
}
}
__global__ void bp_update_conv(int idx)
{
int i,j,k,l,m,n;
i=threadIdx.x+blockDim.x*blockIdx.x;
j=threadIdx.y+blockDim.y*blockIdx.y;
k=threadIdx.z+blockDim.z*blockIdx.z;
__shared__ float _conv_sigma_delta[CONV_W_NUM];
__shared__ float _conv_delta[CONV_W_NUM][POOL_SIZE][POOL_SIZE];
if(i<CONV_W_NUM&&j<POOL_SIZE&&k<POOL_SIZE)
{
float error=0;
_conv_delta[i][j][k]=0;
for(l=0;l<FC1_SIZE;l++)
error+=_fc1_delta[idx%N_STREAM][l]*fc1_w[l][i][j][k];
// _conv_delta[i][j][k]=error*(_pool[idx%N_STREAM][i][j][k]*(1.0-_pool[idx%N_STREAM][i][j][k]));
_conv_delta[i][j][k]=error*(1.0-_pool[idx%N_STREAM][i][j][k]*_pool[idx%N_STREAM][i][j][k]);
__syncthreads();
}
if(i<CONV_W_NUM&&j==0&&k==0)
{
_conv_sigma_delta[i]=0;
for(j=0;j<POOL_SIZE;j++)
for(k=0;k<POOL_SIZE;k++)
_conv_sigma_delta[i]+=_conv_delta[i][j][k];
_conv_db[idx%N_STREAM][i]+=_conv_sigma_delta[i];
}
i=threadIdx.x+blockDim.x*blockIdx.x;
j=threadIdx.y+blockDim.y*blockIdx.y;
k=threadIdx.z+blockDim.z*blockIdx.z;
__syncthreads();
if(i<CONV_W_NUM&&j<CONV_W_SIZE&&k<CONV_W_SIZE)
{
float error=0;
for(m=0;m<POOL_SIZE;m++)
for(n=0;n<POOL_SIZE;n++)
{
int x=_pool_pos[idx%N_STREAM][i][m][n]/2;
int y=_pool_pos[idx%N_STREAM][i][m][n]%2;
error+=_conv_delta[i][m][n]*_input[idx%N_STREAM][2*m+j+x][2*n+k+y];
}
_conv_dw[idx%N_STREAM][i][j][k]+=error;
__syncthreads();
}
}
void bp_update_gpu(int idx)
{
dim3 block1(16,64);
dim3 grid1(1,1);
dim3 block2(6,12,12);
dim3 grid2(1,1,1);
bp_update_fc<<<grid1,block1,0,stream[idx%N_STREAM]>>>(idx);
bp_update_conv<<<grid2,block2,0,stream[idx%N_STREAM]>>>(idx);
}
__global__ void bp_assign_grads_fc(int idx,int minibatch)
{
int i,j,k,l,m,n;
i=threadIdx.x+blockDim.x*blockIdx.x;
j=threadIdx.y+blockDim.y*blockIdx.y;
if(i<FC2_SIZE&&j==0)
{
for(j=0;j<minibatch;j++)
fc2_b[i]-=(_fc2_db[(idx-j)%N_STREAM][i]/minibatch);
for(j=0;j<minibatch;j++)
_fc2_db[(idx-j)%N_STREAM][i]=0;
}
i=threadIdx.x+blockDim.x*blockIdx.x;
j=threadIdx.y+blockDim.y*blockIdx.y;
if(i<FC2_SIZE&&j<FC1_SIZE)
{
for(k=0;k<minibatch;k++)
fc2_w[i][j]-=(_fc2_dw[(idx-k)%N_STREAM][i][j]/minibatch);
for(k=0;k<minibatch;k++)
_fc2_dw[(idx-k)%N_STREAM][i][j]=0;
}
j=threadIdx.x+blockDim.x*blockIdx.x;
i=threadIdx.y+blockDim.y*blockIdx.y;
if(i<FC1_SIZE&&j==0)
{
for(j=0;j<minibatch;j++)
fc1_b[i]-=(_fc1_db[(idx-j)%N_STREAM][i]/minibatch);
for(j=0;j<minibatch;j++)
_fc1_db[(idx-j)%N_STREAM][i]=0;
}
j=threadIdx.x+blockDim.x*blockIdx.x;
i=threadIdx.y+blockDim.y*blockIdx.y;
if(i<FC1_SIZE&&j<CONV_W_NUM)
{
for(k=0;k<POOL_SIZE;k++)
for(l=0;l<POOL_SIZE;l++)
{
for(int m=0;m<minibatch;m++)
fc1_w[i][j][k][l]-=(_fc1_dw[(idx-m)%N_STREAM][i][j][k][l]/minibatch);
for(int m=0;m<minibatch;m++)
_fc1_dw[(idx-m)%N_STREAM][i][j][k][l]=0;
}
}
}
__global__ void bp_assign_grads_conv(int idx,int minibatch)
{
int i,j,k,l,m,n;
i=threadIdx.x+blockDim.x*blockIdx.x;
j=threadIdx.y+blockDim.y*blockIdx.y;
k=threadIdx.z+blockDim.z*blockIdx.z;
if(i<CONV_W_NUM&&j==0&&k==0)
{
for(j=0;j<minibatch;j++)
conv_b[i]-=(_conv_db[(idx-j)%N_STREAM][i]/minibatch);
for(j=0;j<minibatch;j++)
_conv_db[(idx-j)%N_STREAM][i]=0;
}
i=threadIdx.x+blockDim.x*blockIdx.x;
j=threadIdx.y+blockDim.y*blockIdx.y;
k=threadIdx.z+blockDim.z*blockIdx.z;
if(i<CONV_W_NUM&&j<CONV_W_SIZE&&k<CONV_W_SIZE)
{
for(l=0;l<minibatch;l++)
conv_w[i][j][k]-=(_conv_dw[(idx-l)%N_STREAM][i][j][k]/minibatch);
for(l=0;l<minibatch;l++)
_conv_dw[(idx-l)%N_STREAM][i][j][k]=0;
}
}
void bp_assign_grads_gpu(int idx)
{
dim3 block1(16,64);
dim3 grid1(1,1);
dim3 block2(6,12,12);
dim3 grid2(1,1,1);
bp_assign_grads_fc<<<grid1,block1,0,stream[idx%N_STREAM]>>>(idx,minibatch);
bp_assign_grads_conv<<<grid2,block2,0,stream[idx%N_STREAM]>>>(idx,minibatch);
} |
0ecf0640f0d82e5f55dc32624e41aaf49e79036b.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This application demonstrates how to use the CUDA API to use multiple GPUs
*/
#include <math.h>
#include <stdio.h>
#include <chrono>
#include <hip/hip_runtime.h>
#ifndef MIN
#define MIN(a,b) (a < b ? a : b)
#endif
#include "simpleMultiDevice.h"
// Data configuration
#ifndef MAX_GPU_COUNT
#define MAX_GPU_COUNT 8
#endif
const int DATA_N = 1048576 * 32;
// Simple reduction kernel.
// Refer to the 'reduction' CUDA Sample describing
// reduction optimization strategies
__global__
void reduceKernel(float *d_Result, const float *d_Input, int N)
{
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
const int threadN = gridDim.x * blockDim.x;
float sum = 0;
for (int pos = tid; pos < N; pos += threadN)
sum += d_Input[pos];
d_Result[tid] = sum;
}
// Program main
int main(int argc, char **argv)
{
if (argc != 2)
{
printf("Usage: %s <repeat>\n", argv[0]);
return 1;
}
int repeat = atoi(argv[1]);
//Solver config
TGPUplan plan[MAX_GPU_COUNT];
//GPU reduction results
float h_SumGPU[MAX_GPU_COUNT];
float sumGPU;
double sumCPU, diff;
int i, j, k, GPU_N;
const int BLOCK_N = 32;
const int THREAD_N = 256;
const int ACCUM_N = BLOCK_N * THREAD_N;
printf("Starting simpleMultiDevice\n");
hipGetDeviceCount(&GPU_N);
GPU_N = MIN(GPU_N, MAX_GPU_COUNT);
printf("GPU device count: %i\n", GPU_N);
printf("Generating input data of size %d ...\n\n", DATA_N);
//Subdividing input data across GPUs
//Get data sizes for each GPU
for (i = 0; i < GPU_N; i++)
{
plan[i].dataN = DATA_N / GPU_N;
}
//Take into account "odd" data sizes
for (i = 0; i < DATA_N % GPU_N; i++)
{
plan[i].dataN++;
}
//Assign data ranges to GPUs
for (i = 0; i < GPU_N; i++)
{
plan[i].h_Sum = h_SumGPU + i;
}
//Create streams for issuing GPU command asynchronously
//allocate memory (GPU and System page-locked)
for (i = 0; i < GPU_N; i++)
{
hipSetDevice(i);
hipStreamCreate(&plan[i].stream);
//Allocate memory
hipMalloc((void **)&plan[i].d_Data, plan[i].dataN * sizeof(float));
hipMalloc((void **)&plan[i].d_Sum, ACCUM_N * sizeof(float));
hipHostMalloc((void **)&plan[i].h_Sum_from_device, ACCUM_N * sizeof(float));
hipHostMalloc((void **)&plan[i].h_Data, plan[i].dataN * sizeof(float));
for (j = 0; j < plan[i].dataN; j++)
{
plan[i].h_Data[j] = (float)rand() / (float)RAND_MAX;
}
}
//Start timing and compute on GPU(s)
printf("Computing with %d GPUs...\n", GPU_N);
auto start = std::chrono::steady_clock::now();
for (k = 0; k < repeat; k++)
{
//Copy data to GPU, launch the kernel and copy data back. All asynchronously
for (i = 0; i < GPU_N; i++)
{
//Set device
hipSetDevice(i);
//Copy input data from CPU
hipMemcpyAsync(plan[i].d_Data, plan[i].h_Data, plan[i].dataN * sizeof(float),
hipMemcpyHostToDevice, plan[i].stream);
//Perform GPU computations
hipLaunchKernelGGL(( reduceKernel), dim3(BLOCK_N), dim3(THREAD_N), 0, plan[i].stream,
plan[i].d_Sum, plan[i].d_Data, plan[i].dataN);
//Read back GPU results
hipMemcpyAsync(plan[i].h_Sum_from_device, plan[i].d_Sum, ACCUM_N *sizeof(float),
hipMemcpyDeviceToHost, plan[i].stream);
}
//Process GPU results
for (i = 0; i < GPU_N; i++)
{
//Set device
hipSetDevice(i);
//Wait for all operations to finish
hipStreamSynchronize(plan[i].stream);
}
}
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf(" Average GPU Processing time: %f (us)\n\n", time * 1e-3f / repeat);
for (i = 0; i < GPU_N; i++)
{
//Finalize GPU reduction for current subvector
float sum = 0;
for (j = 0; j < ACCUM_N; j++)
{
sum += plan[i].h_Sum_from_device[j];
}
*(plan[i].h_Sum) = sum;
//Shut down this GPU
hipHostFree(plan[i].h_Sum_from_device);
hipFree(plan[i].d_Sum);
hipFree(plan[i].d_Data);
hipStreamDestroy(plan[i].stream);
}
sumGPU = 0;
for (i = 0; i < GPU_N; i++)
{
sumGPU += h_SumGPU[i];
}
// Compute on Host CPU
printf("Computing with Host CPU...\n\n");
sumCPU = 0;
for (i = 0; i < GPU_N; i++)
{
for (j = 0; j < plan[i].dataN; j++)
{
sumCPU += plan[i].h_Data[j];
}
}
// Compare GPU and CPU results
printf("Comparing GPU and Host CPU results...\n");
diff = fabs(sumCPU - sumGPU) / fabs(sumCPU);
printf(" GPU sum: %f\n CPU sum: %f\n", sumGPU, sumCPU);
printf(" Relative difference: %E \n\n", diff);
// Cleanup and shutdown
for (i = 0; i < GPU_N; i++)
{
hipSetDevice(i);
hipHostFree(plan[i].h_Data);
}
exit((diff < 1e-5) ? EXIT_SUCCESS : EXIT_FAILURE);
}
| 0ecf0640f0d82e5f55dc32624e41aaf49e79036b.cu | /*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This application demonstrates how to use the CUDA API to use multiple GPUs
*/
#include <math.h>
#include <stdio.h>
#include <chrono>
#include <hip/hip_runtime.h>
#ifndef MIN
#define MIN(a,b) (a < b ? a : b)
#endif
#include "simpleMultiDevice.h"
// Data configuration
#ifndef MAX_GPU_COUNT
#define MAX_GPU_COUNT 8
#endif
const int DATA_N = 1048576 * 32;
// Simple reduction kernel.
// Refer to the 'reduction' CUDA Sample describing
// reduction optimization strategies
__global__
void reduceKernel(float *d_Result, const float *d_Input, int N)
{
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
const int threadN = gridDim.x * blockDim.x;
float sum = 0;
for (int pos = tid; pos < N; pos += threadN)
sum += d_Input[pos];
d_Result[tid] = sum;
}
// Program main
int main(int argc, char **argv)
{
if (argc != 2)
{
printf("Usage: %s <repeat>\n", argv[0]);
return 1;
}
int repeat = atoi(argv[1]);
//Solver config
TGPUplan plan[MAX_GPU_COUNT];
//GPU reduction results
float h_SumGPU[MAX_GPU_COUNT];
float sumGPU;
double sumCPU, diff;
int i, j, k, GPU_N;
const int BLOCK_N = 32;
const int THREAD_N = 256;
const int ACCUM_N = BLOCK_N * THREAD_N;
printf("Starting simpleMultiDevice\n");
hipGetDeviceCount(&GPU_N);
GPU_N = MIN(GPU_N, MAX_GPU_COUNT);
printf("GPU device count: %i\n", GPU_N);
printf("Generating input data of size %d ...\n\n", DATA_N);
//Subdividing input data across GPUs
//Get data sizes for each GPU
for (i = 0; i < GPU_N; i++)
{
plan[i].dataN = DATA_N / GPU_N;
}
//Take into account "odd" data sizes
for (i = 0; i < DATA_N % GPU_N; i++)
{
plan[i].dataN++;
}
//Assign data ranges to GPUs
for (i = 0; i < GPU_N; i++)
{
plan[i].h_Sum = h_SumGPU + i;
}
//Create streams for issuing GPU command asynchronously
//allocate memory (GPU and System page-locked)
for (i = 0; i < GPU_N; i++)
{
hipSetDevice(i);
hipStreamCreate(&plan[i].stream);
//Allocate memory
hipMalloc((void **)&plan[i].d_Data, plan[i].dataN * sizeof(float));
hipMalloc((void **)&plan[i].d_Sum, ACCUM_N * sizeof(float));
hipHostMalloc((void **)&plan[i].h_Sum_from_device, ACCUM_N * sizeof(float));
hipHostMalloc((void **)&plan[i].h_Data, plan[i].dataN * sizeof(float));
for (j = 0; j < plan[i].dataN; j++)
{
plan[i].h_Data[j] = (float)rand() / (float)RAND_MAX;
}
}
//Start timing and compute on GPU(s)
printf("Computing with %d GPUs...\n", GPU_N);
auto start = std::chrono::steady_clock::now();
for (k = 0; k < repeat; k++)
{
//Copy data to GPU, launch the kernel and copy data back. All asynchronously
for (i = 0; i < GPU_N; i++)
{
//Set device
hipSetDevice(i);
//Copy input data from CPU
hipMemcpyAsync(plan[i].d_Data, plan[i].h_Data, plan[i].dataN * sizeof(float),
hipMemcpyHostToDevice, plan[i].stream);
//Perform GPU computations
reduceKernel<<<BLOCK_N, THREAD_N, 0, plan[i].stream>>>(
plan[i].d_Sum, plan[i].d_Data, plan[i].dataN);
//Read back GPU results
hipMemcpyAsync(plan[i].h_Sum_from_device, plan[i].d_Sum, ACCUM_N *sizeof(float),
hipMemcpyDeviceToHost, plan[i].stream);
}
//Process GPU results
for (i = 0; i < GPU_N; i++)
{
//Set device
hipSetDevice(i);
//Wait for all operations to finish
hipStreamSynchronize(plan[i].stream);
}
}
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf(" Average GPU Processing time: %f (us)\n\n", time * 1e-3f / repeat);
for (i = 0; i < GPU_N; i++)
{
//Finalize GPU reduction for current subvector
float sum = 0;
for (j = 0; j < ACCUM_N; j++)
{
sum += plan[i].h_Sum_from_device[j];
}
*(plan[i].h_Sum) = sum;
//Shut down this GPU
hipHostFree(plan[i].h_Sum_from_device);
hipFree(plan[i].d_Sum);
hipFree(plan[i].d_Data);
hipStreamDestroy(plan[i].stream);
}
sumGPU = 0;
for (i = 0; i < GPU_N; i++)
{
sumGPU += h_SumGPU[i];
}
// Compute on Host CPU
printf("Computing with Host CPU...\n\n");
sumCPU = 0;
for (i = 0; i < GPU_N; i++)
{
for (j = 0; j < plan[i].dataN; j++)
{
sumCPU += plan[i].h_Data[j];
}
}
// Compare GPU and CPU results
printf("Comparing GPU and Host CPU results...\n");
diff = fabs(sumCPU - sumGPU) / fabs(sumCPU);
printf(" GPU sum: %f\n CPU sum: %f\n", sumGPU, sumCPU);
printf(" Relative difference: %E \n\n", diff);
// Cleanup and shutdown
for (i = 0; i < GPU_N; i++)
{
hipSetDevice(i);
hipHostFree(plan[i].h_Data);
}
exit((diff < 1e-5) ? EXIT_SUCCESS : EXIT_FAILURE);
}
|
4a7aab9ed3da25b504bec8777d93da165e4c6c59.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <cstdint>
#include <cstdlib>
#include <algorithm>
#include "SyncedMemory.h"
#include "pgm.h"
#include "lab3.h"
using namespace std;
#define CHECK {\
auto e = hipDeviceSynchronize();\
if (e != hipSuccess) {\
printf("At " __FILE__ ":%d, %s\n", __LINE__, hipGetErrorString(e));\
abort();\
}\
}
int main(int argc, char **argv)
{
if (argc != 7) {
printf("Usage: %s <background> <target> <mask> <offset x> <offset y> <output>\n", argv[0]);
abort();
}
bool sucb, suct, sucm;
int wb, hb, cb, wt, ht, ct, wm, hm, cm;
auto imgb = ReadNetpbm(wb, hb, cb, sucb, argv[1]);
auto imgt = ReadNetpbm(wt, ht, ct, suct, argv[2]);
auto imgm = ReadNetpbm(wm, hm, cm, sucm, argv[3]);
if (!(sucb && suct && sucm)) {
puts("Something wrong with reading the input image files.");
abort();
}
if (wt != wm || ht != hm) {
puts("The mask and target image must have the same size.");
abort();
}
if (cm != 1) {
puts("The mask image must be mono-colored.");
abort();
}
if (cb != 3 || ct != 3) {
puts("The background and target image must be colored.");
abort();
}
const int oy = atoi(argv[4]), ox = atoi(argv[5]);
const int SIZEB = wb*hb*3;
const int SIZET = wt*ht*3;
const int SIZEM = wm*hm;
MemoryBuffer<float> background(SIZEB), target(SIZET), mask(SIZEM), output(SIZEB);
auto background_s = background.CreateSync(SIZEB);
auto target_s = target.CreateSync(SIZET);
auto mask_s = mask.CreateSync(SIZEM);
auto output_s = output.CreateSync(SIZEB);
float *background_cpu = background_s.get_cpu_wo();
float *target_cpu = target_s.get_cpu_wo();
float *mask_cpu = mask_s.get_cpu_wo();
copy(imgb.get(), imgb.get()+SIZEB, background_cpu);
copy(imgt.get(), imgt.get()+SIZET, target_cpu);
copy(imgm.get(), imgm.get()+SIZEM, mask_cpu);
PoissonImageCloning(
background_s.get_gpu_ro(),
target_s.get_gpu_ro(),
mask_s.get_gpu_ro(),
output_s.get_gpu_wo(),
wb, hb, wt, ht, oy, ox
);
unique_ptr<uint8_t[]> o(new uint8_t[SIZEB]);
const float *o_cpu = output_s.get_cpu_ro();
transform(o_cpu, o_cpu+SIZEB, o.get(), [](float f) -> uint8_t { return max(min(int(f+0.5f), 255), 0); });
WritePPM(o.get(), wb, hb, argv[6]);
return 0;
}
| 4a7aab9ed3da25b504bec8777d93da165e4c6c59.cu | #include <cstdio>
#include <cstdint>
#include <cstdlib>
#include <algorithm>
#include "SyncedMemory.h"
#include "pgm.h"
#include "lab3.h"
using namespace std;
#define CHECK {\
auto e = cudaDeviceSynchronize();\
if (e != cudaSuccess) {\
printf("At " __FILE__ ":%d, %s\n", __LINE__, cudaGetErrorString(e));\
abort();\
}\
}
int main(int argc, char **argv)
{
if (argc != 7) {
printf("Usage: %s <background> <target> <mask> <offset x> <offset y> <output>\n", argv[0]);
abort();
}
bool sucb, suct, sucm;
int wb, hb, cb, wt, ht, ct, wm, hm, cm;
auto imgb = ReadNetpbm(wb, hb, cb, sucb, argv[1]);
auto imgt = ReadNetpbm(wt, ht, ct, suct, argv[2]);
auto imgm = ReadNetpbm(wm, hm, cm, sucm, argv[3]);
if (!(sucb && suct && sucm)) {
puts("Something wrong with reading the input image files.");
abort();
}
if (wt != wm || ht != hm) {
puts("The mask and target image must have the same size.");
abort();
}
if (cm != 1) {
puts("The mask image must be mono-colored.");
abort();
}
if (cb != 3 || ct != 3) {
puts("The background and target image must be colored.");
abort();
}
const int oy = atoi(argv[4]), ox = atoi(argv[5]);
const int SIZEB = wb*hb*3;
const int SIZET = wt*ht*3;
const int SIZEM = wm*hm;
MemoryBuffer<float> background(SIZEB), target(SIZET), mask(SIZEM), output(SIZEB);
auto background_s = background.CreateSync(SIZEB);
auto target_s = target.CreateSync(SIZET);
auto mask_s = mask.CreateSync(SIZEM);
auto output_s = output.CreateSync(SIZEB);
float *background_cpu = background_s.get_cpu_wo();
float *target_cpu = target_s.get_cpu_wo();
float *mask_cpu = mask_s.get_cpu_wo();
copy(imgb.get(), imgb.get()+SIZEB, background_cpu);
copy(imgt.get(), imgt.get()+SIZET, target_cpu);
copy(imgm.get(), imgm.get()+SIZEM, mask_cpu);
PoissonImageCloning(
background_s.get_gpu_ro(),
target_s.get_gpu_ro(),
mask_s.get_gpu_ro(),
output_s.get_gpu_wo(),
wb, hb, wt, ht, oy, ox
);
unique_ptr<uint8_t[]> o(new uint8_t[SIZEB]);
const float *o_cpu = output_s.get_cpu_ro();
transform(o_cpu, o_cpu+SIZEB, o.get(), [](float f) -> uint8_t { return max(min(int(f+0.5f), 255), 0); });
WritePPM(o.get(), wb, hb, argv[6]);
return 0;
}
|
9adaed5e0200bd65db845073f13d70afa3bd6673.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2020, Tobias Rapp
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the Karlsruhe Institute of Technology nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "particle_grid.cuh"
#include "cut/cuda_math.h"
#include <thrust/device_ptr.h>
#include <thrust/extrema.h>
using namespace cut;
/**
* For all grid indices in cellPtr, this assigns every grid cell an index to the first particle in the cell.
*/
__global__ void setGridCellsKernel(GridCell *cells, const CellIndex *cellPtr, uint32_t numParticles)
{
auto gId = blockDim.x * blockIdx.x + threadIdx.x;
if (gId >= numParticles)
return;
if (gId == 0 || cellPtr[gId] != cellPtr[gId - 1])
cells[cellPtr[gId]] = gId;
}
void setGridCellsGPU(GridCell *cells, const CellIndex *cellPtr, uint32_t num_particles)
{
dim3 threadsPerBlock = 128;
dim3 numPartBlocks = (num_particles + threadsPerBlock.x - 1) / threadsPerBlock.x;
hipLaunchKernelGGL(( setGridCellsKernel), dim3(numPartBlocks), dim3(threadsPerBlock), 0, 0, cells, cellPtr, num_particles);
}
unique_ptr<UniformGridGPU> createGridFromDomainGPU(const float *x, const float *y, const float *z, uint32_t num_particles, float r)
{
thrust::device_ptr<const float> ptrX(x);
thrust::device_ptr<const float> ptrY(y);
thrust::device_ptr<const float> ptrZ(z);
auto itX = thrust::minmax_element(ptrX, ptrX + num_particles);
auto itY = thrust::minmax_element(ptrY, ptrY + num_particles);
auto itZ = thrust::minmax_element(ptrZ, ptrZ + num_particles);
Vec3f min(*itX.first, *itY.first, *itZ.first);
Vec3f max(*itX.second, *itY.second, *itZ.second);
return createGridFromDomain<true>(r, min, max);
}
| 9adaed5e0200bd65db845073f13d70afa3bd6673.cu | // Copyright (c) 2020, Tobias Rapp
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the Karlsruhe Institute of Technology nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "particle_grid.cuh"
#include "cut/cuda_math.h"
#include <thrust/device_ptr.h>
#include <thrust/extrema.h>
using namespace cut;
/**
* For all grid indices in cellPtr, this assigns every grid cell an index to the first particle in the cell.
*/
__global__ void setGridCellsKernel(GridCell *cells, const CellIndex *cellPtr, uint32_t numParticles)
{
auto gId = blockDim.x * blockIdx.x + threadIdx.x;
if (gId >= numParticles)
return;
if (gId == 0 || cellPtr[gId] != cellPtr[gId - 1])
cells[cellPtr[gId]] = gId;
}
void setGridCellsGPU(GridCell *cells, const CellIndex *cellPtr, uint32_t num_particles)
{
dim3 threadsPerBlock = 128;
dim3 numPartBlocks = (num_particles + threadsPerBlock.x - 1) / threadsPerBlock.x;
setGridCellsKernel<<<numPartBlocks, threadsPerBlock>>>(cells, cellPtr, num_particles);
}
unique_ptr<UniformGridGPU> createGridFromDomainGPU(const float *x, const float *y, const float *z, uint32_t num_particles, float r)
{
thrust::device_ptr<const float> ptrX(x);
thrust::device_ptr<const float> ptrY(y);
thrust::device_ptr<const float> ptrZ(z);
auto itX = thrust::minmax_element(ptrX, ptrX + num_particles);
auto itY = thrust::minmax_element(ptrY, ptrY + num_particles);
auto itZ = thrust::minmax_element(ptrZ, ptrZ + num_particles);
Vec3f min(*itX.first, *itY.first, *itZ.first);
Vec3f max(*itX.second, *itY.second, *itZ.second);
return createGridFromDomain<true>(r, min, max);
}
|
d5d1282edddca2cce3d9783ed62865c048cdcd4f.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <cstdlib>
#include <cmath>
#include <ctime>
#include <cfloat>
#include <algorithm>
#include <chrono>
#include <iomanip>
#include <iostream>
#include <map>
#include <memory>
#include <random>
#include <sstream>
#include <string>
#include <vector>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <rocblas.h>
#include <cudnn.h>
/*** Definitions ***/
// Block width for CUDA kernels
#define BW 128
#define RANDOM_SEED -1
#ifdef USE_GFLAGS
#include <gflags/gflags.h>
#ifndef _WIN32
#define gflags google
#endif
#else
// Constant versions of gflags
#define DEFINE_int32(flag, default_value, description) const int FLAGS_##flag = (default_value)
#define DEFINE_uint64(flag, default_value, description) const unsigned long long FLAGS_##flag = (default_value)
#define DEFINE_bool(flag, default_value, description) const bool FLAGS_##flag = (default_value)
#define DEFINE_double(flag, default_value, description) const double FLAGS_##flag = (default_value)
#define DEFINE_string(flag, default_value, description) const std::string FLAGS_##flag ((default_value))
#endif
#define FatalError(s) do { \
std::stringstream _where, _message; \
_where << __FILE__ << ':' << __LINE__; \
_message << std::string(s) + "\n" << __FILE__ << ':' << __LINE__; \
std::cerr << _message.str() << "\nAborting...\n"; \
hipDeviceReset(); \
exit(1); \
} while(0)
#define checkCUDNN(status) do { \
std::stringstream _error; \
if (status != CUDNN_STATUS_SUCCESS) { \
_error << "CUDNN failure: " << cudnnGetErrorString(status); \
FatalError(_error.str()); \
} \
} while(0)
#define checkCudaErrors(status) do { \
std::stringstream _error; \
if (status != 0) { \
_error << "Cuda failure: " << status; \
FatalError(_error.str()); \
} \
} while(0)
/********************************************************************************************************************/
#include "../include/convolution.h"
#include "../include/max_pool.h"
#include "../include/relu.h"
#include "../include/softmax.h"
#include "../include/sigmoid.h"
/* Utility Functions */
void pprint(float* matrix, int size, int width){
for(int i=0; i<size; i++){
if(i%width==0) std::cout << std::endl;
std::cout << matrix[i] << " ";
}
std::cout << std::endl;
}
/*********************/
void test_convolution()
{
printf("*********** CONV_TEST **************\n");
int WIDTH = 4, HEIGHT = 5, BATCH_SIZE = 1, CHANNELS = 1;
int GPU_ID = 0;
checkCudaErrors(hipSetDevice(GPU_ID));
float *data, *output;
cudnnHandle_t cudnn;
hipblasHandle_t cublas;
cudnnTensorDescriptor_t d1, d2; // dummy descriptors
cudnnCreate(&cudnn);
hipblasCreate(&cublas);
Conv c(1, CHANNELS, 3, 1, 1, cudnn, cublas,
BATCH_SIZE, WIDTH, HEIGHT, true, true, GPU_ID, d1, d2, true);
hipMalloc(&data, sizeof(float) * c.input_size);
hipMalloc(&output, sizeof(float) * c.output_size);
float *cpu_data = (float *)malloc(sizeof(float) * c.input_size);
const float data_[5][4] = {{1, 6, 11, 16},
{2, 7, 12, 17},
{3, 8, 13, 18},
{4, 9, 14, 19},
{5, 10, 15, 20}};
const float data_grad[5][4] = {{0, 0, 0, 0},
{0, 10, 10, 0},
{0, 0, 0, 0},
{0, 10, 10, 0},
{0, 0, 0, 0}};
for(int i = 0;i < HEIGHT;i++)
for(int j = 0;j < WIDTH;j++)
cpu_data[i*WIDTH + j] = data_[i][j];
// for(int i = 0;i < HEIGHT;i++) {
// for(int j = 0;j < WIDTH;j++)
// std::cout << cpu_data[i*WIDTH + j] << " ";
// std::cout << std::endl;
// }
checkCudaErrors(hipMemcpyAsync(data, cpu_data, sizeof(float) * c.input_size, hipMemcpyHostToDevice));
c.forward(data, output);
// Move from device to host
float *out = (float *)malloc(sizeof(float) * c.output_size);
checkCudaErrors(hipMemcpy(out, output, sizeof(float) * c.output_size, hipMemcpyDeviceToHost));
printf("Output of conv:\n");
pprint(out, c.output_size, WIDTH);
float *grad_above = (float *)malloc(sizeof(float) * c.output_size);
for(int i = 0;i < HEIGHT;i++)
for(int j = 0;j < WIDTH;j++)
grad_above[i*WIDTH + j] = data_grad[i][j];
printf("Grad abovve\n");
pprint(grad_above, c.output_size, WIDTH);
float *d_grad_above;
hipMalloc(&d_grad_above, sizeof(float) * c.output_size);
checkCudaErrors(hipMemcpyAsync(d_grad_above, grad_above, sizeof(float) * c.output_size, hipMemcpyHostToDevice));
c.backward(d_grad_above, c.input_descriptor, data);
int t = c.in_channels * c.kernel_size * c.kernel_size * c.out_channels;
float *grad_kernel = (float *)malloc(sizeof(float) * t);
checkCudaErrors(hipMemcpy(grad_kernel, c.grad_kernel, sizeof(float) * t, hipMemcpyDeviceToHost));
std::cout<<"Printing grad_kernels . . .\n";
for(int i = 0;i < t;i++)
std::cout << grad_kernel[i] << " ";
std::cout << std::endl;
t = c.out_channels;
float *grad_bias = (float *)malloc(sizeof(float) * t);
checkCudaErrors(hipMemcpy(grad_bias, c.grad_bias, sizeof(float) * t, hipMemcpyDeviceToHost));
std::cout<<"Printing grad_bias . . .\n";
for(int i = 0;i < t;i++)
std::cout << grad_bias[i] << " ";
std::cout << std::endl;
t = BATCH_SIZE * HEIGHT * WIDTH * CHANNELS;
float *grad_data = (float *)malloc(sizeof(float) * t);
checkCudaErrors(hipMemcpy(grad_data, c.grad_data, sizeof(float) * t, hipMemcpyDeviceToHost));
std::cout<<"Printing grad_data . . .\n";
pprint(grad_data, c.input_size, WIDTH);
/* for(int i = 0;i < t;i++)
std::cout << grad_data[i] << " ";
std::cout << std::endl;*/
printf("\n");
}
void test_mpl()
{
printf("*********** CONV_POOL TEST **************\n");
// Initialize image and cudnn handles
// std::cout << "-------- TESTING MAX POOL LAYER --------\n";
int WIDTH_CONV = 4, HEIGHT_CONV = 5, KERNEL_SIZE_CONV=3, PADDING_CONV=1, STRIDE_CONV=1; //Input to Conv
int SIZE_MAX_POOL=2, STRIDE_MAX_POOL=2, PADDING_MAX_POOL=0, HEIGHT_MAX_POOL=(HEIGHT_CONV - KERNEL_SIZE_CONV + 2*PADDING_CONV)/STRIDE_CONV + 1, WIDTH_MAX_POOL=(WIDTH_CONV - KERNEL_SIZE_CONV + 2*PADDING_CONV)/STRIDE_CONV + 1; //For MaxPool
int BATCH_SIZE = 1, CHANNELS = 1; //Image
int GPU_ID = 0;
//int output_height = (HEIGHT_MAX_POOL - SIZE_MAX_POOL) / 2 + 1;
//int output_width = (WIDTH_MAX_POOL - 2) / 2 + 1;
checkCudaErrors(hipSetDevice(GPU_ID));
float *data, *output_conv, *output_max_pool, *input_diff_grad, *output_diff_grad;
cudnnHandle_t cudnn;
hipblasHandle_t cublas;
cudnnTensorDescriptor_t d1, d2; // dummy descriptors
cudnnCreate(&cudnn);
hipblasCreate(&cublas);
// Stack Layers
Conv c(1, CHANNELS, KERNEL_SIZE_CONV, PADDING_CONV, STRIDE_CONV, cudnn, cublas, BATCH_SIZE, WIDTH_CONV, HEIGHT_CONV, true, true, GPU_ID, d1, d2, true);
MaxPoolLayer mpl(SIZE_MAX_POOL, STRIDE_MAX_POOL, PADDING_MAX_POOL, BATCH_SIZE, CHANNELS, HEIGHT_MAX_POOL, WIDTH_MAX_POOL, GPU_ID, cudnn, c.output_descriptor, d2, true);
//Initialize tensors device
hipMalloc(&data, sizeof(float) * c.input_size); //CONV INPUT
hipMalloc(&output_conv, sizeof(float) * c.output_size); //CONV OUTPUT
hipMalloc(&output_max_pool, sizeof(float) * mpl.output_size);
//Initalize arrays host
float *cpu_data = (float *)malloc(sizeof(float) * c.input_size);
const float data_[5][4] = {{1, 6, 11, 16},
{2, 7, 12, 17},
{3, 8, 13, 18},
{4, 9, 14, 19},
{5, 10, 15, 20}};
for(int i=0; i<5; i++)
for(int j=0; j<4; j++)
cpu_data[4*i + j] = data_[i][j];
//for(int i = 0;i < c.input_size;i++) cpu_data[i] = 3.0;
checkCudaErrors(hipMemcpyAsync(data, cpu_data, sizeof(float) * c.input_size, hipMemcpyHostToDevice));
float* output_matrix = (float *)malloc(sizeof(float)*mpl.output_size);
float* output_matrix_conv = (float *)malloc(sizeof(float)*mpl.input_size);
float* grad_data_conv = (float *)malloc(sizeof(float) * c.input_size);
std::cout << "Input Matrix:";
pprint(cpu_data, c.input_size, WIDTH_CONV);
std::cout << "\nApply Convolution kernel_size=3, padding=1, stride=1:\n";
c.forward(data, output_conv);
checkCudaErrors(hipMemcpy(output_matrix_conv, output_conv, sizeof(float)*mpl.input_size, hipMemcpyDeviceToHost));
std::cout << "\nOutput Matrix From Convolution:";
pprint(output_matrix_conv, mpl.input_size, mpl.input_width);
std::cout << "\nPerforming max pool size=(2,2), stride=(2, 2), padding=(0, 0)\n";
mpl.forward(output_conv, output_max_pool);
checkCudaErrors(hipMemcpy(output_matrix, output_max_pool, sizeof(float)*mpl.output_size, hipMemcpyDeviceToHost));
std::cout << "\nOutput Matrix From Max Pool:";
pprint(output_matrix, mpl.output_size, mpl.out_width);
//COMMENT BACKWARD
//Generate a input differential gradient recieved by max pool layer in backprop
hipMalloc(&input_diff_grad, sizeof(float) * mpl.output_size);
hipMalloc(&output_diff_grad, sizeof(float) * mpl.input_size);
float *input_diff_grad_cpu = (float *)malloc(sizeof(float) * mpl.output_size);
for(int i = 0;i < mpl.output_size;i++) input_diff_grad_cpu[i] = 10.0;
checkCudaErrors(hipMemcpyAsync(input_diff_grad, input_diff_grad_cpu, sizeof(float) * mpl.output_size, hipMemcpyHostToDevice));
float* output_gradient = (float *)malloc(sizeof(float)*mpl.input_size);
mpl.backward(output_conv, input_diff_grad, output_max_pool, output_diff_grad/*, c.output_descriptor*/);
checkCudaErrors(hipMemcpy(output_gradient, output_diff_grad, sizeof(float)*mpl.input_size, hipMemcpyDeviceToHost));
std::cout << "\nGradient from Max Pool Layer:";
pprint(output_gradient, mpl.input_size, mpl.input_width);
// printf("\n\n\nDone\n\n\n");
// checkCudaErrors(hipSetDevice(GPU_ID));
std::cout << "\nBackpropping that through conv:\n";
c.backward(output_diff_grad, c.input_descriptor, data);
checkCudaErrors(hipMemcpy(grad_data_conv, c.grad_data, sizeof(float)*c.input_size, hipMemcpyDeviceToHost));
printf("\nGrad data conv");
pprint(grad_data_conv, c.input_size, WIDTH_CONV);
float *grad_kernel = (float *)malloc(sizeof(float) * 9);
checkCudaErrors(hipMemcpy(grad_kernel, c.grad_kernel, sizeof(float) * 9, hipMemcpyDeviceToHost));
std::cout<<"Printing grad_kernels . . .\n";
for(int i = 0;i < 9;i++)
std::cout << grad_kernel[i] << " ";
std::cout << std::endl;
int t = c.out_channels;
float *grad_bias = (float *)malloc(sizeof(float) * t);
checkCudaErrors(hipMemcpy(grad_bias, c.grad_bias, sizeof(float) * t, hipMemcpyDeviceToHost));
std::cout<<"Printing grad_bias . . .\n";
for(int i = 0;i < t;i++)
std::cout << grad_bias[i] << " ";
std::cout << std::endl;
return;
}
void test_relu()
{
printf("*********** RELU TEST **************\n");
int WIDTH = 5, HEIGHT = 5, BATCH_SIZE = 1, CHANNELS = 1;
int GPU_ID = 0;
checkCudaErrors(hipSetDevice(GPU_ID));
float *data, *output, *dup, *dout;
cudnnHandle_t cudnn;
hipblasHandle_t cublas;
cudnnTensorDescriptor_t d1, d2; // dummy descriptors
cudnnCreate(&cudnn);
hipblasCreate(&cublas);
Relu R(CHANNELS, CHANNELS, cudnn, cublas, BATCH_SIZE, HEIGHT, WIDTH, GPU_ID, d1, d2, true);
hipMalloc((void **)&data, sizeof(float) * R.input_size);
hipMalloc((void **)&output, sizeof(float) * R.output_size);
hipMalloc((void **)&dout, sizeof(float) * R.output_size);
hipMalloc((void **)&dup, sizeof(float) * R.output_size);
float *cpu_data = (float *)malloc(sizeof(float) * R.input_size);
for(int i = 0; i < R.input_size; i++)
cpu_data[i] = -12.0 + i;
cpu_data[1] = 3234.0; //to check clipping
cpu_data[20] = 3566.0;
std::cout<<"Testing Forward . . ."<<std::endl;
std::cout << "Input Matrix:"<<std::endl;
for(int i=0; i<R.input_size; i++)
{
if(i%WIDTH==0)
std::cout << "\n";
std::cout << cpu_data[i] << " ";
}
std::cout << "\nApply ReLU:"<<std::endl;
checkCudaErrors(hipMemcpy(data, cpu_data, sizeof(float) * R.input_size, hipMemcpyHostToDevice));
// std::cout << "\nApply ReLU 2:"<<std::endl;
R.forward(data, output);
float *out = (float *)malloc(sizeof(float) * R.output_size);
checkCudaErrors(hipMemcpy(out, output, sizeof(float) * R.output_size, hipMemcpyDeviceToHost));
std::cout << "Output Matrix:"<<std::endl;
for(int i=0; i<R.output_size; i++)
{
if(i%WIDTH==0)
std::cout << "\n";
std::cout << out[i] << " ";
}
std::cout<<std::endl;
std::cout<<"Testing Backward . . ."<<std::endl;
float *cpu_dup = (float *)malloc(sizeof(float) * R.output_size);
for(int i=0; i<R.output_size; i++)
cpu_dup[i] = 100 + i;
std::cout << "Upstream Derivatives:";
for(int i=0; i<R.output_size; i++)
{
if(i%WIDTH==0)
std::cout << "\n";
std::cout << cpu_dup[i] << " ";
}
std::cout<<std::endl;
checkCudaErrors(hipMemcpy(dup, cpu_dup, sizeof(float) * R.output_size, hipMemcpyHostToDevice));
std::cout << "\nApply Backward:"<<std::endl;
R.backward(dup, dout);
float *cpu_dout = (float *)malloc(sizeof(float) * R.input_size);
checkCudaErrors(hipMemcpy(cpu_dout, dout, sizeof(float) * R.input_size, hipMemcpyDeviceToHost));
std::cout << "Back prop results :"<<std::endl;
for(int i=0; i<R.input_size; i++)
{
if(i%WIDTH==0)
std::cout << "\n";
std::cout << cpu_dout[i] << " ";
}
std::cout<<std::endl;
}
void test_conv_relu_maxpool()
{
printf("*********** CONV_RELU_POOL TEST **************\n");
// Initialize image and cudnn handles
int WIDTH_CONV = 4, HEIGHT_CONV = 5, KERNEL_SIZE_CONV=3, PADDING_CONV=1, STRIDE_CONV=1; //Input to Conv
int SIZE_MAX_POOL=2, STRIDE_MAX_POOL=2, PADDING_MAX_POOL=0; //For MaxPool
int BATCH_SIZE = 1, CHANNELS = 1; //Image
int GPU_ID = 0;
checkCudaErrors(hipSetDevice(GPU_ID));
float *data, *output_conv, *output_relu, *output_max_pool, *input_diff_grad, *output_diff_grad, *output_diff_grad_relu;
cudnnHandle_t cudnn;
hipblasHandle_t cublas;
cudnnTensorDescriptor_t d1, d2; // dummy descriptors
cudnnCreate(&cudnn);
hipblasCreate(&cublas);
// Stack Layers
Conv c(1, CHANNELS, KERNEL_SIZE_CONV, PADDING_CONV, STRIDE_CONV, cudnn, cublas, BATCH_SIZE, WIDTH_CONV, HEIGHT_CONV, true, true, GPU_ID, d1, d2, true);
Relu R(CHANNELS, CHANNELS, cudnn, cublas, BATCH_SIZE, c.out_height, c.out_width, GPU_ID, c.output_descriptor, d2, false);
MaxPoolLayer mpl(SIZE_MAX_POOL, STRIDE_MAX_POOL, PADDING_MAX_POOL, BATCH_SIZE, CHANNELS, R.out_height, R.out_width, GPU_ID, cudnn, R.output_descriptor, d2, false);
//Initialize tensors device
hipMalloc(&data, sizeof(float) * c.input_size); //CONV INPUT
hipMalloc(&output_conv, sizeof(float) * c.output_size); //CONV OUTPUT-RELU INPUT
hipMalloc(&output_relu, sizeof(float) * R.output_size); //RELU OUTPUT-MAXPOOL INPUT
hipMalloc(&output_max_pool, sizeof(float) * mpl.output_size); //MAXPOOL OUTPUT
//Initalize arrays host
float *cpu_data = (float *)malloc(sizeof(float) * c.input_size);
const float data_[5][4] = {{1, 6, 11, 16},
{2, 7, 12, 17},
{3, 8, 13, 18},
{4, 9, 14, 19},
{5, 10, 15, 20}};
for(int i=0; i<5; i++)
for(int j=0; j<4; j++)
cpu_data[4*i + j] = data_[i][j];
//for(int i = 0;i < c.input_size;i++) cpu_data[i] = 3.0;
checkCudaErrors(hipMemcpyAsync(data, cpu_data, sizeof(float) * c.input_size, hipMemcpyHostToDevice));
float* output_matrix_conv = (float *)malloc(sizeof(float)*c.output_size);
float* output_matrix_relu = (float *)malloc(sizeof(float)*R.output_size);
float* output_matrix = (float *)malloc(sizeof(float)*mpl.output_size);
float* grad_data_conv = (float *)malloc(sizeof(float) * c.input_size);
std::cout << "Input Matrix:";
pprint(cpu_data, c.input_size, WIDTH_CONV);
std::cout << "\nApply Convolution kernel_size=3, padding=1, stride=1:\n";
c.forward(data, output_conv);
checkCudaErrors(hipMemcpy(output_matrix_conv, output_conv, sizeof(float)*mpl.input_size, hipMemcpyDeviceToHost));
std::cout << "\nOutput Matrix From Convolution:";
pprint(output_matrix_conv, c.output_size, mpl.input_width);
std::cout << "\nApply Relu:\n";
R.forward(output_conv, output_relu);
checkCudaErrors(hipMemcpy(output_matrix_relu, output_relu, sizeof(float)*R.output_size, hipMemcpyDeviceToHost));
std::cout << "\nOutput Matrix From Relu:";
pprint(output_matrix_relu, R.output_size, R.out_width);
std::cout << "\nPerforming max pool size=(2,2), stride=(2, 2), padding=(0, 0)\n";
mpl.forward(output_relu, output_max_pool);
checkCudaErrors(hipMemcpy(output_matrix, output_max_pool, sizeof(float)*mpl.output_size, hipMemcpyDeviceToHost));
std::cout << "\nOutput Matrix From Max Pool:";
pprint(output_matrix, mpl.output_size, mpl.out_width);
// 'COMMENT BACKWARD'
//Generate a input differential gradient recieved by max pool layer in backprop
hipMalloc(&input_diff_grad, sizeof(float) * mpl.output_size);
hipMalloc(&output_diff_grad, sizeof(float) * mpl.input_size);
hipMalloc(&output_diff_grad_relu, sizeof(float) * R.input_size);
float *input_diff_grad_cpu = (float *)malloc(sizeof(float) * mpl.output_size);
for(int i = 0;i < mpl.output_size;i++) input_diff_grad_cpu[i] = 10.0;
checkCudaErrors(hipMemcpyAsync(input_diff_grad, input_diff_grad_cpu, sizeof(float) * mpl.output_size, hipMemcpyHostToDevice));
float* output_gradient = (float *)malloc(sizeof(float)*mpl.input_size);
mpl.backward(output_conv, input_diff_grad, output_max_pool, output_diff_grad);
checkCudaErrors(hipMemcpy(output_gradient, output_diff_grad, sizeof(float)*mpl.input_size, hipMemcpyDeviceToHost));
std::cout << "\nGradient from Max Pool Layer:";
pprint(output_gradient, mpl.input_size, mpl.input_width);
std::cout << "\nBackpropping that through relu:";
R.backward(output_diff_grad, output_diff_grad_relu);
output_gradient = (float *)malloc(sizeof(float)*R.input_size);
checkCudaErrors(hipMemcpy(output_gradient, output_diff_grad_relu, sizeof(float)*R.input_size, hipMemcpyDeviceToHost));
std::cout << "\nGradient from Relu Layer:";
pprint(output_gradient, R.input_size, R.input_width);
std::cout << "\nBackpropping that through conv:\n";
c.backward(output_diff_grad_relu, c.input_descriptor, data);
checkCudaErrors(hipMemcpy(grad_data_conv, c.grad_data, sizeof(float)*c.input_size, hipMemcpyDeviceToHost));
printf("\nGrad data conv: ");
pprint(grad_data_conv, c.input_size, WIDTH_CONV);
float *grad_kernel = (float *)malloc(sizeof(float) * 9);
checkCudaErrors(hipMemcpy(grad_kernel, c.grad_kernel, sizeof(float) * 9, hipMemcpyDeviceToHost));
std::cout<<"Printing grad_kernels . . .\n";
for(int i = 0;i < 9;i++)
std::cout << grad_kernel[i] << " ";
std::cout << std::endl;
int t = c.out_channels;
float *grad_bias = (float *)malloc(sizeof(float) * t);
checkCudaErrors(hipMemcpy(grad_bias, c.grad_bias, sizeof(float) * t, hipMemcpyDeviceToHost));
std::cout<<"Printing grad_bias . . .\n";
for(int i = 0;i < t;i++)
std::cout << grad_bias[i] << " ";
std::cout << std::endl;
}
void test_sigmoid()
{
int WIDTH = 5, HEIGHT = 5, BATCH_SIZE = 1, CHANNELS = 1;
int GPU_ID = 0;
checkCudaErrors(hipSetDevice(GPU_ID));
float *data, *output, *dup, *dout;
cudnnHandle_t cudnn;
hipblasHandle_t cublas;
cudnnTensorDescriptor_t d1, d2; // dummy descriptors
cudnnCreate(&cudnn);
hipblasCreate(&cublas);
Sigmoid R(CHANNELS, CHANNELS, cudnn, cublas, BATCH_SIZE, HEIGHT, WIDTH, GPU_ID, d1, d2, true);
hipMalloc((void **)&data, sizeof(float) * R.input_size);
hipMalloc((void **)&output, sizeof(float) * R.output_size);
hipMalloc((void **)&dout, sizeof(float) * R.output_size);
hipMalloc((void **)&dup, sizeof(float) * R.output_size);
float *cpu_data = (float *)malloc(sizeof(float) * R.input_size);
for(int i = 0; i < R.input_size; i++)
cpu_data[i] = -12.0 + i;
cpu_data[1] = 3234.0; //to check clipping
cpu_data[20] = 3566.0;
// std::cout<<"Testing Forward . . ."<<std::endl;
// std::cout << "Input Matrix:"<<std::endl;
// for(int i=0; i<R.input_size; i++)
// {
// if(i%WIDTH==0)
// std::cout << "\n";
// std::cout << cpu_data[i] << " ";
// }
// std::cout << "\nApply ReLU:"<<std::endl;
checkCudaErrors(hipMemcpy(data, cpu_data, sizeof(float) * R.input_size, hipMemcpyHostToDevice));
std::cout << "\nApply Sigmoid\n\n\n:"<<std::endl;
R.forward(data, output);
float *out = (float *)malloc(sizeof(float) * R.output_size);
checkCudaErrors(hipMemcpy(out, output, sizeof(float) * R.output_size, hipMemcpyDeviceToHost));
// std::cout << "Output Matrix:"<<std::endl;
// for(int i=0; i<R.output_size; i++)
// {
// if(i%WIDTH==0)
// std::cout << "\n";
// std::cout << out[i] << " ";
// }
// std::cout<<std::endl;
// std::cout<<"Testing Backward . . ."<<std::endl;
float *cpu_dup = (float *)malloc(sizeof(float) * R.output_size);
for(int i=0; i<R.output_size; i++)
cpu_dup[i] = 100 + i;
// std::cout << "Upstream Derivatives:";
// for(int i=0; i<R.output_size; i++)
// {
// if(i%WIDTH==0)
// std::cout << "\n";
// std::cout << cpu_dup[i] << " ";
// }
// std::cout<<std::endl;
checkCudaErrors(hipMemcpy(dup, cpu_dup, sizeof(float) * R.output_size, hipMemcpyHostToDevice));
// std::cout << "\nApply Backward:"<<std::endl;
R.backward(dup, dout);
float *cpu_dout = (float *)malloc(sizeof(float) * R.input_size);
checkCudaErrors(hipMemcpy(cpu_dout, dout, sizeof(float) * R.input_size, hipMemcpyDeviceToHost));
std::cout << "Done\n\n\n";
// std::cout << "Back prop results :"<<std::endl;
// for(int i=0; i<R.input_size; i++)
// {
// if(i%WIDTH==0)
// std::cout << "\n";
// std::cout << cpu_dout[i] << " ";
// }
// std::cout<<std::endl;
}
void test() {
// Tests both backward and forward
test_convolution();
printf("\n\n\n----------Convolution Test Passed!-----------\n\n\n");
test_mpl();
printf("\n\n\n----------Max-Pooling Test Passed!-----------\n\n\n");
test_relu();
printf("\n\n\n----------Relu Test Passed!-----------\n\n\n");
test_conv_relu_maxpool();
printf("\n\n\n----------Conv Relu Maxpool Test Passed!-----------\n\n\n");
// test_sigmoid();
// printf("\n\n\n----------Sigmoid Test Passed!-----------\n\n\n");
}
int main() {
test();
printf("Out\n\n\n");
return 0;
}
| d5d1282edddca2cce3d9783ed62865c048cdcd4f.cu | #include <cstdio>
#include <cstdlib>
#include <cmath>
#include <ctime>
#include <cfloat>
#include <algorithm>
#include <chrono>
#include <iomanip>
#include <iostream>
#include <map>
#include <memory>
#include <random>
#include <sstream>
#include <string>
#include <vector>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <cublas_v2.h>
#include <cudnn.h>
/*** Definitions ***/
// Block width for CUDA kernels
#define BW 128
#define RANDOM_SEED -1
#ifdef USE_GFLAGS
#include <gflags/gflags.h>
#ifndef _WIN32
#define gflags google
#endif
#else
// Constant versions of gflags
#define DEFINE_int32(flag, default_value, description) const int FLAGS_##flag = (default_value)
#define DEFINE_uint64(flag, default_value, description) const unsigned long long FLAGS_##flag = (default_value)
#define DEFINE_bool(flag, default_value, description) const bool FLAGS_##flag = (default_value)
#define DEFINE_double(flag, default_value, description) const double FLAGS_##flag = (default_value)
#define DEFINE_string(flag, default_value, description) const std::string FLAGS_##flag ((default_value))
#endif
#define FatalError(s) do { \
std::stringstream _where, _message; \
_where << __FILE__ << ':' << __LINE__; \
_message << std::string(s) + "\n" << __FILE__ << ':' << __LINE__; \
std::cerr << _message.str() << "\nAborting...\n"; \
cudaDeviceReset(); \
exit(1); \
} while(0)
#define checkCUDNN(status) do { \
std::stringstream _error; \
if (status != CUDNN_STATUS_SUCCESS) { \
_error << "CUDNN failure: " << cudnnGetErrorString(status); \
FatalError(_error.str()); \
} \
} while(0)
#define checkCudaErrors(status) do { \
std::stringstream _error; \
if (status != 0) { \
_error << "Cuda failure: " << status; \
FatalError(_error.str()); \
} \
} while(0)
/********************************************************************************************************************/
#include "../include/convolution.h"
#include "../include/max_pool.h"
#include "../include/relu.h"
#include "../include/softmax.h"
#include "../include/sigmoid.h"
/* Utility Functions */
void pprint(float* matrix, int size, int width){
for(int i=0; i<size; i++){
if(i%width==0) std::cout << std::endl;
std::cout << matrix[i] << " ";
}
std::cout << std::endl;
}
/*********************/
void test_convolution()
{
printf("*********** CONV_TEST **************\n");
int WIDTH = 4, HEIGHT = 5, BATCH_SIZE = 1, CHANNELS = 1;
int GPU_ID = 0;
checkCudaErrors(cudaSetDevice(GPU_ID));
float *data, *output;
cudnnHandle_t cudnn;
cublasHandle_t cublas;
cudnnTensorDescriptor_t d1, d2; // dummy descriptors
cudnnCreate(&cudnn);
cublasCreate(&cublas);
Conv c(1, CHANNELS, 3, 1, 1, cudnn, cublas,
BATCH_SIZE, WIDTH, HEIGHT, true, true, GPU_ID, d1, d2, true);
cudaMalloc(&data, sizeof(float) * c.input_size);
cudaMalloc(&output, sizeof(float) * c.output_size);
float *cpu_data = (float *)malloc(sizeof(float) * c.input_size);
const float data_[5][4] = {{1, 6, 11, 16},
{2, 7, 12, 17},
{3, 8, 13, 18},
{4, 9, 14, 19},
{5, 10, 15, 20}};
const float data_grad[5][4] = {{0, 0, 0, 0},
{0, 10, 10, 0},
{0, 0, 0, 0},
{0, 10, 10, 0},
{0, 0, 0, 0}};
for(int i = 0;i < HEIGHT;i++)
for(int j = 0;j < WIDTH;j++)
cpu_data[i*WIDTH + j] = data_[i][j];
// for(int i = 0;i < HEIGHT;i++) {
// for(int j = 0;j < WIDTH;j++)
// std::cout << cpu_data[i*WIDTH + j] << " ";
// std::cout << std::endl;
// }
checkCudaErrors(cudaMemcpyAsync(data, cpu_data, sizeof(float) * c.input_size, cudaMemcpyHostToDevice));
c.forward(data, output);
// Move from device to host
float *out = (float *)malloc(sizeof(float) * c.output_size);
checkCudaErrors(cudaMemcpy(out, output, sizeof(float) * c.output_size, cudaMemcpyDeviceToHost));
printf("Output of conv:\n");
pprint(out, c.output_size, WIDTH);
float *grad_above = (float *)malloc(sizeof(float) * c.output_size);
for(int i = 0;i < HEIGHT;i++)
for(int j = 0;j < WIDTH;j++)
grad_above[i*WIDTH + j] = data_grad[i][j];
printf("Grad abovve\n");
pprint(grad_above, c.output_size, WIDTH);
float *d_grad_above;
cudaMalloc(&d_grad_above, sizeof(float) * c.output_size);
checkCudaErrors(cudaMemcpyAsync(d_grad_above, grad_above, sizeof(float) * c.output_size, cudaMemcpyHostToDevice));
c.backward(d_grad_above, c.input_descriptor, data);
int t = c.in_channels * c.kernel_size * c.kernel_size * c.out_channels;
float *grad_kernel = (float *)malloc(sizeof(float) * t);
checkCudaErrors(cudaMemcpy(grad_kernel, c.grad_kernel, sizeof(float) * t, cudaMemcpyDeviceToHost));
std::cout<<"Printing grad_kernels . . .\n";
for(int i = 0;i < t;i++)
std::cout << grad_kernel[i] << " ";
std::cout << std::endl;
t = c.out_channels;
float *grad_bias = (float *)malloc(sizeof(float) * t);
checkCudaErrors(cudaMemcpy(grad_bias, c.grad_bias, sizeof(float) * t, cudaMemcpyDeviceToHost));
std::cout<<"Printing grad_bias . . .\n";
for(int i = 0;i < t;i++)
std::cout << grad_bias[i] << " ";
std::cout << std::endl;
t = BATCH_SIZE * HEIGHT * WIDTH * CHANNELS;
float *grad_data = (float *)malloc(sizeof(float) * t);
checkCudaErrors(cudaMemcpy(grad_data, c.grad_data, sizeof(float) * t, cudaMemcpyDeviceToHost));
std::cout<<"Printing grad_data . . .\n";
pprint(grad_data, c.input_size, WIDTH);
/* for(int i = 0;i < t;i++)
std::cout << grad_data[i] << " ";
std::cout << std::endl;*/
printf("\n");
}
void test_mpl()
{
printf("*********** CONV_POOL TEST **************\n");
// Initialize image and cudnn handles
// std::cout << "-------- TESTING MAX POOL LAYER --------\n";
int WIDTH_CONV = 4, HEIGHT_CONV = 5, KERNEL_SIZE_CONV=3, PADDING_CONV=1, STRIDE_CONV=1; //Input to Conv
int SIZE_MAX_POOL=2, STRIDE_MAX_POOL=2, PADDING_MAX_POOL=0, HEIGHT_MAX_POOL=(HEIGHT_CONV - KERNEL_SIZE_CONV + 2*PADDING_CONV)/STRIDE_CONV + 1, WIDTH_MAX_POOL=(WIDTH_CONV - KERNEL_SIZE_CONV + 2*PADDING_CONV)/STRIDE_CONV + 1; //For MaxPool
int BATCH_SIZE = 1, CHANNELS = 1; //Image
int GPU_ID = 0;
//int output_height = (HEIGHT_MAX_POOL - SIZE_MAX_POOL) / 2 + 1;
//int output_width = (WIDTH_MAX_POOL - 2) / 2 + 1;
checkCudaErrors(cudaSetDevice(GPU_ID));
float *data, *output_conv, *output_max_pool, *input_diff_grad, *output_diff_grad;
cudnnHandle_t cudnn;
cublasHandle_t cublas;
cudnnTensorDescriptor_t d1, d2; // dummy descriptors
cudnnCreate(&cudnn);
cublasCreate(&cublas);
// Stack Layers
Conv c(1, CHANNELS, KERNEL_SIZE_CONV, PADDING_CONV, STRIDE_CONV, cudnn, cublas, BATCH_SIZE, WIDTH_CONV, HEIGHT_CONV, true, true, GPU_ID, d1, d2, true);
MaxPoolLayer mpl(SIZE_MAX_POOL, STRIDE_MAX_POOL, PADDING_MAX_POOL, BATCH_SIZE, CHANNELS, HEIGHT_MAX_POOL, WIDTH_MAX_POOL, GPU_ID, cudnn, c.output_descriptor, d2, true);
//Initialize tensors device
cudaMalloc(&data, sizeof(float) * c.input_size); //CONV INPUT
cudaMalloc(&output_conv, sizeof(float) * c.output_size); //CONV OUTPUT
cudaMalloc(&output_max_pool, sizeof(float) * mpl.output_size);
//Initalize arrays host
float *cpu_data = (float *)malloc(sizeof(float) * c.input_size);
const float data_[5][4] = {{1, 6, 11, 16},
{2, 7, 12, 17},
{3, 8, 13, 18},
{4, 9, 14, 19},
{5, 10, 15, 20}};
for(int i=0; i<5; i++)
for(int j=0; j<4; j++)
cpu_data[4*i + j] = data_[i][j];
//for(int i = 0;i < c.input_size;i++) cpu_data[i] = 3.0;
checkCudaErrors(cudaMemcpyAsync(data, cpu_data, sizeof(float) * c.input_size, cudaMemcpyHostToDevice));
float* output_matrix = (float *)malloc(sizeof(float)*mpl.output_size);
float* output_matrix_conv = (float *)malloc(sizeof(float)*mpl.input_size);
float* grad_data_conv = (float *)malloc(sizeof(float) * c.input_size);
std::cout << "Input Matrix:";
pprint(cpu_data, c.input_size, WIDTH_CONV);
std::cout << "\nApply Convolution kernel_size=3, padding=1, stride=1:\n";
c.forward(data, output_conv);
checkCudaErrors(cudaMemcpy(output_matrix_conv, output_conv, sizeof(float)*mpl.input_size, cudaMemcpyDeviceToHost));
std::cout << "\nOutput Matrix From Convolution:";
pprint(output_matrix_conv, mpl.input_size, mpl.input_width);
std::cout << "\nPerforming max pool size=(2,2), stride=(2, 2), padding=(0, 0)\n";
mpl.forward(output_conv, output_max_pool);
checkCudaErrors(cudaMemcpy(output_matrix, output_max_pool, sizeof(float)*mpl.output_size, cudaMemcpyDeviceToHost));
std::cout << "\nOutput Matrix From Max Pool:";
pprint(output_matrix, mpl.output_size, mpl.out_width);
//COMMENT BACKWARD
//Generate a input differential gradient recieved by max pool layer in backprop
cudaMalloc(&input_diff_grad, sizeof(float) * mpl.output_size);
cudaMalloc(&output_diff_grad, sizeof(float) * mpl.input_size);
float *input_diff_grad_cpu = (float *)malloc(sizeof(float) * mpl.output_size);
for(int i = 0;i < mpl.output_size;i++) input_diff_grad_cpu[i] = 10.0;
checkCudaErrors(cudaMemcpyAsync(input_diff_grad, input_diff_grad_cpu, sizeof(float) * mpl.output_size, cudaMemcpyHostToDevice));
float* output_gradient = (float *)malloc(sizeof(float)*mpl.input_size);
mpl.backward(output_conv, input_diff_grad, output_max_pool, output_diff_grad/*, c.output_descriptor*/);
checkCudaErrors(cudaMemcpy(output_gradient, output_diff_grad, sizeof(float)*mpl.input_size, cudaMemcpyDeviceToHost));
std::cout << "\nGradient from Max Pool Layer:";
pprint(output_gradient, mpl.input_size, mpl.input_width);
// printf("\n\n\nDone\n\n\n");
// checkCudaErrors(cudaSetDevice(GPU_ID));
std::cout << "\nBackpropping that through conv:\n";
c.backward(output_diff_grad, c.input_descriptor, data);
checkCudaErrors(cudaMemcpy(grad_data_conv, c.grad_data, sizeof(float)*c.input_size, cudaMemcpyDeviceToHost));
printf("\nGrad data conv");
pprint(grad_data_conv, c.input_size, WIDTH_CONV);
float *grad_kernel = (float *)malloc(sizeof(float) * 9);
checkCudaErrors(cudaMemcpy(grad_kernel, c.grad_kernel, sizeof(float) * 9, cudaMemcpyDeviceToHost));
std::cout<<"Printing grad_kernels . . .\n";
for(int i = 0;i < 9;i++)
std::cout << grad_kernel[i] << " ";
std::cout << std::endl;
int t = c.out_channels;
float *grad_bias = (float *)malloc(sizeof(float) * t);
checkCudaErrors(cudaMemcpy(grad_bias, c.grad_bias, sizeof(float) * t, cudaMemcpyDeviceToHost));
std::cout<<"Printing grad_bias . . .\n";
for(int i = 0;i < t;i++)
std::cout << grad_bias[i] << " ";
std::cout << std::endl;
return;
}
void test_relu()
{
printf("*********** RELU TEST **************\n");
int WIDTH = 5, HEIGHT = 5, BATCH_SIZE = 1, CHANNELS = 1;
int GPU_ID = 0;
checkCudaErrors(cudaSetDevice(GPU_ID));
float *data, *output, *dup, *dout;
cudnnHandle_t cudnn;
cublasHandle_t cublas;
cudnnTensorDescriptor_t d1, d2; // dummy descriptors
cudnnCreate(&cudnn);
cublasCreate(&cublas);
Relu R(CHANNELS, CHANNELS, cudnn, cublas, BATCH_SIZE, HEIGHT, WIDTH, GPU_ID, d1, d2, true);
cudaMalloc((void **)&data, sizeof(float) * R.input_size);
cudaMalloc((void **)&output, sizeof(float) * R.output_size);
cudaMalloc((void **)&dout, sizeof(float) * R.output_size);
cudaMalloc((void **)&dup, sizeof(float) * R.output_size);
float *cpu_data = (float *)malloc(sizeof(float) * R.input_size);
for(int i = 0; i < R.input_size; i++)
cpu_data[i] = -12.0 + i;
cpu_data[1] = 3234.0; //to check clipping
cpu_data[20] = 3566.0;
std::cout<<"Testing Forward . . ."<<std::endl;
std::cout << "Input Matrix:"<<std::endl;
for(int i=0; i<R.input_size; i++)
{
if(i%WIDTH==0)
std::cout << "\n";
std::cout << cpu_data[i] << " ";
}
std::cout << "\nApply ReLU:"<<std::endl;
checkCudaErrors(cudaMemcpy(data, cpu_data, sizeof(float) * R.input_size, cudaMemcpyHostToDevice));
// std::cout << "\nApply ReLU 2:"<<std::endl;
R.forward(data, output);
float *out = (float *)malloc(sizeof(float) * R.output_size);
checkCudaErrors(cudaMemcpy(out, output, sizeof(float) * R.output_size, cudaMemcpyDeviceToHost));
std::cout << "Output Matrix:"<<std::endl;
for(int i=0; i<R.output_size; i++)
{
if(i%WIDTH==0)
std::cout << "\n";
std::cout << out[i] << " ";
}
std::cout<<std::endl;
std::cout<<"Testing Backward . . ."<<std::endl;
float *cpu_dup = (float *)malloc(sizeof(float) * R.output_size);
for(int i=0; i<R.output_size; i++)
cpu_dup[i] = 100 + i;
std::cout << "Upstream Derivatives:";
for(int i=0; i<R.output_size; i++)
{
if(i%WIDTH==0)
std::cout << "\n";
std::cout << cpu_dup[i] << " ";
}
std::cout<<std::endl;
checkCudaErrors(cudaMemcpy(dup, cpu_dup, sizeof(float) * R.output_size, cudaMemcpyHostToDevice));
std::cout << "\nApply Backward:"<<std::endl;
R.backward(dup, dout);
float *cpu_dout = (float *)malloc(sizeof(float) * R.input_size);
checkCudaErrors(cudaMemcpy(cpu_dout, dout, sizeof(float) * R.input_size, cudaMemcpyDeviceToHost));
std::cout << "Back prop results :"<<std::endl;
for(int i=0; i<R.input_size; i++)
{
if(i%WIDTH==0)
std::cout << "\n";
std::cout << cpu_dout[i] << " ";
}
std::cout<<std::endl;
}
void test_conv_relu_maxpool()
{
printf("*********** CONV_RELU_POOL TEST **************\n");
// Initialize image and cudnn handles
int WIDTH_CONV = 4, HEIGHT_CONV = 5, KERNEL_SIZE_CONV=3, PADDING_CONV=1, STRIDE_CONV=1; //Input to Conv
int SIZE_MAX_POOL=2, STRIDE_MAX_POOL=2, PADDING_MAX_POOL=0; //For MaxPool
int BATCH_SIZE = 1, CHANNELS = 1; //Image
int GPU_ID = 0;
checkCudaErrors(cudaSetDevice(GPU_ID));
float *data, *output_conv, *output_relu, *output_max_pool, *input_diff_grad, *output_diff_grad, *output_diff_grad_relu;
cudnnHandle_t cudnn;
cublasHandle_t cublas;
cudnnTensorDescriptor_t d1, d2; // dummy descriptors
cudnnCreate(&cudnn);
cublasCreate(&cublas);
// Stack Layers
Conv c(1, CHANNELS, KERNEL_SIZE_CONV, PADDING_CONV, STRIDE_CONV, cudnn, cublas, BATCH_SIZE, WIDTH_CONV, HEIGHT_CONV, true, true, GPU_ID, d1, d2, true);
Relu R(CHANNELS, CHANNELS, cudnn, cublas, BATCH_SIZE, c.out_height, c.out_width, GPU_ID, c.output_descriptor, d2, false);
MaxPoolLayer mpl(SIZE_MAX_POOL, STRIDE_MAX_POOL, PADDING_MAX_POOL, BATCH_SIZE, CHANNELS, R.out_height, R.out_width, GPU_ID, cudnn, R.output_descriptor, d2, false);
//Initialize tensors device
cudaMalloc(&data, sizeof(float) * c.input_size); //CONV INPUT
cudaMalloc(&output_conv, sizeof(float) * c.output_size); //CONV OUTPUT-RELU INPUT
cudaMalloc(&output_relu, sizeof(float) * R.output_size); //RELU OUTPUT-MAXPOOL INPUT
cudaMalloc(&output_max_pool, sizeof(float) * mpl.output_size); //MAXPOOL OUTPUT
//Initalize arrays host
float *cpu_data = (float *)malloc(sizeof(float) * c.input_size);
const float data_[5][4] = {{1, 6, 11, 16},
{2, 7, 12, 17},
{3, 8, 13, 18},
{4, 9, 14, 19},
{5, 10, 15, 20}};
for(int i=0; i<5; i++)
for(int j=0; j<4; j++)
cpu_data[4*i + j] = data_[i][j];
//for(int i = 0;i < c.input_size;i++) cpu_data[i] = 3.0;
checkCudaErrors(cudaMemcpyAsync(data, cpu_data, sizeof(float) * c.input_size, cudaMemcpyHostToDevice));
float* output_matrix_conv = (float *)malloc(sizeof(float)*c.output_size);
float* output_matrix_relu = (float *)malloc(sizeof(float)*R.output_size);
float* output_matrix = (float *)malloc(sizeof(float)*mpl.output_size);
float* grad_data_conv = (float *)malloc(sizeof(float) * c.input_size);
std::cout << "Input Matrix:";
pprint(cpu_data, c.input_size, WIDTH_CONV);
std::cout << "\nApply Convolution kernel_size=3, padding=1, stride=1:\n";
c.forward(data, output_conv);
checkCudaErrors(cudaMemcpy(output_matrix_conv, output_conv, sizeof(float)*mpl.input_size, cudaMemcpyDeviceToHost));
std::cout << "\nOutput Matrix From Convolution:";
pprint(output_matrix_conv, c.output_size, mpl.input_width);
std::cout << "\nApply Relu:\n";
R.forward(output_conv, output_relu);
checkCudaErrors(cudaMemcpy(output_matrix_relu, output_relu, sizeof(float)*R.output_size, cudaMemcpyDeviceToHost));
std::cout << "\nOutput Matrix From Relu:";
pprint(output_matrix_relu, R.output_size, R.out_width);
std::cout << "\nPerforming max pool size=(2,2), stride=(2, 2), padding=(0, 0)\n";
mpl.forward(output_relu, output_max_pool);
checkCudaErrors(cudaMemcpy(output_matrix, output_max_pool, sizeof(float)*mpl.output_size, cudaMemcpyDeviceToHost));
std::cout << "\nOutput Matrix From Max Pool:";
pprint(output_matrix, mpl.output_size, mpl.out_width);
// 'COMMENT BACKWARD'
//Generate a input differential gradient recieved by max pool layer in backprop
cudaMalloc(&input_diff_grad, sizeof(float) * mpl.output_size);
cudaMalloc(&output_diff_grad, sizeof(float) * mpl.input_size);
cudaMalloc(&output_diff_grad_relu, sizeof(float) * R.input_size);
float *input_diff_grad_cpu = (float *)malloc(sizeof(float) * mpl.output_size);
for(int i = 0;i < mpl.output_size;i++) input_diff_grad_cpu[i] = 10.0;
checkCudaErrors(cudaMemcpyAsync(input_diff_grad, input_diff_grad_cpu, sizeof(float) * mpl.output_size, cudaMemcpyHostToDevice));
float* output_gradient = (float *)malloc(sizeof(float)*mpl.input_size);
mpl.backward(output_conv, input_diff_grad, output_max_pool, output_diff_grad);
checkCudaErrors(cudaMemcpy(output_gradient, output_diff_grad, sizeof(float)*mpl.input_size, cudaMemcpyDeviceToHost));
std::cout << "\nGradient from Max Pool Layer:";
pprint(output_gradient, mpl.input_size, mpl.input_width);
std::cout << "\nBackpropping that through relu:";
R.backward(output_diff_grad, output_diff_grad_relu);
output_gradient = (float *)malloc(sizeof(float)*R.input_size);
checkCudaErrors(cudaMemcpy(output_gradient, output_diff_grad_relu, sizeof(float)*R.input_size, cudaMemcpyDeviceToHost));
std::cout << "\nGradient from Relu Layer:";
pprint(output_gradient, R.input_size, R.input_width);
std::cout << "\nBackpropping that through conv:\n";
c.backward(output_diff_grad_relu, c.input_descriptor, data);
checkCudaErrors(cudaMemcpy(grad_data_conv, c.grad_data, sizeof(float)*c.input_size, cudaMemcpyDeviceToHost));
printf("\nGrad data conv: ");
pprint(grad_data_conv, c.input_size, WIDTH_CONV);
float *grad_kernel = (float *)malloc(sizeof(float) * 9);
checkCudaErrors(cudaMemcpy(grad_kernel, c.grad_kernel, sizeof(float) * 9, cudaMemcpyDeviceToHost));
std::cout<<"Printing grad_kernels . . .\n";
for(int i = 0;i < 9;i++)
std::cout << grad_kernel[i] << " ";
std::cout << std::endl;
int t = c.out_channels;
float *grad_bias = (float *)malloc(sizeof(float) * t);
checkCudaErrors(cudaMemcpy(grad_bias, c.grad_bias, sizeof(float) * t, cudaMemcpyDeviceToHost));
std::cout<<"Printing grad_bias . . .\n";
for(int i = 0;i < t;i++)
std::cout << grad_bias[i] << " ";
std::cout << std::endl;
}
void test_sigmoid()
{
int WIDTH = 5, HEIGHT = 5, BATCH_SIZE = 1, CHANNELS = 1;
int GPU_ID = 0;
checkCudaErrors(cudaSetDevice(GPU_ID));
float *data, *output, *dup, *dout;
cudnnHandle_t cudnn;
cublasHandle_t cublas;
cudnnTensorDescriptor_t d1, d2; // dummy descriptors
cudnnCreate(&cudnn);
cublasCreate(&cublas);
Sigmoid R(CHANNELS, CHANNELS, cudnn, cublas, BATCH_SIZE, HEIGHT, WIDTH, GPU_ID, d1, d2, true);
cudaMalloc((void **)&data, sizeof(float) * R.input_size);
cudaMalloc((void **)&output, sizeof(float) * R.output_size);
cudaMalloc((void **)&dout, sizeof(float) * R.output_size);
cudaMalloc((void **)&dup, sizeof(float) * R.output_size);
float *cpu_data = (float *)malloc(sizeof(float) * R.input_size);
for(int i = 0; i < R.input_size; i++)
cpu_data[i] = -12.0 + i;
cpu_data[1] = 3234.0; //to check clipping
cpu_data[20] = 3566.0;
// std::cout<<"Testing Forward . . ."<<std::endl;
// std::cout << "Input Matrix:"<<std::endl;
// for(int i=0; i<R.input_size; i++)
// {
// if(i%WIDTH==0)
// std::cout << "\n";
// std::cout << cpu_data[i] << " ";
// }
// std::cout << "\nApply ReLU:"<<std::endl;
checkCudaErrors(cudaMemcpy(data, cpu_data, sizeof(float) * R.input_size, cudaMemcpyHostToDevice));
std::cout << "\nApply Sigmoid\n\n\n:"<<std::endl;
R.forward(data, output);
float *out = (float *)malloc(sizeof(float) * R.output_size);
checkCudaErrors(cudaMemcpy(out, output, sizeof(float) * R.output_size, cudaMemcpyDeviceToHost));
// std::cout << "Output Matrix:"<<std::endl;
// for(int i=0; i<R.output_size; i++)
// {
// if(i%WIDTH==0)
// std::cout << "\n";
// std::cout << out[i] << " ";
// }
// std::cout<<std::endl;
// std::cout<<"Testing Backward . . ."<<std::endl;
float *cpu_dup = (float *)malloc(sizeof(float) * R.output_size);
for(int i=0; i<R.output_size; i++)
cpu_dup[i] = 100 + i;
// std::cout << "Upstream Derivatives:";
// for(int i=0; i<R.output_size; i++)
// {
// if(i%WIDTH==0)
// std::cout << "\n";
// std::cout << cpu_dup[i] << " ";
// }
// std::cout<<std::endl;
checkCudaErrors(cudaMemcpy(dup, cpu_dup, sizeof(float) * R.output_size, cudaMemcpyHostToDevice));
// std::cout << "\nApply Backward:"<<std::endl;
R.backward(dup, dout);
float *cpu_dout = (float *)malloc(sizeof(float) * R.input_size);
checkCudaErrors(cudaMemcpy(cpu_dout, dout, sizeof(float) * R.input_size, cudaMemcpyDeviceToHost));
std::cout << "Done\n\n\n";
// std::cout << "Back prop results :"<<std::endl;
// for(int i=0; i<R.input_size; i++)
// {
// if(i%WIDTH==0)
// std::cout << "\n";
// std::cout << cpu_dout[i] << " ";
// }
// std::cout<<std::endl;
}
void test() {
// Tests both backward and forward
test_convolution();
printf("\n\n\n----------Convolution Test Passed!-----------\n\n\n");
test_mpl();
printf("\n\n\n----------Max-Pooling Test Passed!-----------\n\n\n");
test_relu();
printf("\n\n\n----------Relu Test Passed!-----------\n\n\n");
test_conv_relu_maxpool();
printf("\n\n\n----------Conv Relu Maxpool Test Passed!-----------\n\n\n");
// test_sigmoid();
// printf("\n\n\n----------Sigmoid Test Passed!-----------\n\n\n");
}
int main() {
test();
printf("Out\n\n\n");
return 0;
}
|
3481a05252cb7900dde3523f7fcd5999b360edaa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <chrono>
#include <fstream>
#include <getopt.h>
#include <iostream>
#include <string>
#include <hip/hip_fp16.h>
#include <hiprand/hiprand_kernel.h>
#include <rocblas.h>
#include <hipcub/hipcub.hpp>
#include "cudamacro.h"
#define LATTICE_SUP_N (256)
#define LATTICE_SUB_N (LATTICE_SUP_N / 2)
#define TCRIT 2.26918531421f
#define THREADS (LATTICE_SUB_N)
#define SUP_OFFSET(i,j,nbx) (((j)*(long long)(nbx) + (i))*LATTICE_SUP_N*LATTICE_SUP_N)
#define SUB_OFFSET(i,j) (((j)*LATTICE_SUP_N + (i)*LATTICE_SUB_N)*LATTICE_SUB_N)
#define SUB_ELEM(i,j) ((j)*LATTICE_SUB_N + (i))
#define CUB_CHUNK_SIZE ((1ll<<31) - (1ll<<28))
__global__ void set_k(__half* k, __half* kT) {
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const int i = tid % LATTICE_SUB_N;
const int j = tid / LATTICE_SUB_N;
if (j >= LATTICE_SUB_N) return;
__half val = __float2half(0.0f);
if (i == j || i + 1 == j) {
val = __float2half(1.0f);
}
k[j*LATTICE_SUB_N + i] = val;
kT[i*LATTICE_SUB_N + j] = val;
}
__global__ void init_spins(__half* lattice,
const unsigned long long seed,
const int nbx,
const int nby,
const long long offset) {
const long long tid = static_cast<long long>(blockDim.x) * blockIdx.x + threadIdx.x + offset;
const long long nx = nbx * LATTICE_SUP_N;
const long long ny = nby * LATTICE_SUP_N;
if (tid >= nx * ny) return;
hiprandStatePhilox4_32_10_t state;
hiprand_init(seed, tid, 0, &state);
float randval = hiprand_uniform(&state);
__half val = (randval < 0.5f) ? __float2half(-1.0f) : __float2half(1.0f);
lattice[tid] = val;
}
template <int N>
struct __align__(sizeof(__half)*N) halfn {
__half val[N];
};
#define NLOOPS 2
#define SPINSPERTHREAD 8
template<bool is_black>
__global__ void update_spins(__half* lattice,
float inv_temp,
const __half* __restrict__ nn_sums,
const unsigned long long seed,
const unsigned long long iter,
const int nbx,
const int nby,
const long long offset) {
const long long tid = static_cast<long long>(blockDim.x) * blockIdx.x + threadIdx.x + offset;
const int threads_per_subblock = LATTICE_SUB_N * LATTICE_SUB_N / (NLOOPS * SPINSPERTHREAD);
int bi = tid / threads_per_subblock % (2 * nbx);
int bj = tid / (threads_per_subblock * 2 * nbx);
// subblock local thread idx
int tl = tid % threads_per_subblock;
if (bj >= nby) return;
// Offset threads depending on parity and color
if (is_black) {
if (bi % 2) {
bj = 2*bj + 1;
} else {
bj = 2*bj;
}
} else {
if (bi % 2) {
bj = 2*bj;
} else {
bj = 2*bj + 1;
}
}
hiprandStatePhilox4_32_10_t state;
hiprand_init(seed, tid, iter, &state);
#pragma unroll
for (int n = 0; n < NLOOPS; n++) {
size_t elem_offset = SUP_OFFSET(bi/2, bj/2, nbx) + SUB_OFFSET(bi%2, bj%2) + (tl + n * threads_per_subblock) * SPINSPERTHREAD;
halfn<SPINSPERTHREAD> lij = *(reinterpret_cast<halfn<SPINSPERTHREAD>*>(lattice + elem_offset));
const halfn<SPINSPERTHREAD> nn = *(reinterpret_cast<const halfn<SPINSPERTHREAD>*>(nn_sums + elem_offset));
#pragma unroll
for (int m = 0; m < SPINSPERTHREAD; m++) {
float randval = hiprand_uniform(&state);
float accept = exp(-2.0f * inv_temp * __half2float(nn.val[m] * lij.val[m]));
if (randval < accept) {
lij.val[m] = -lij.val[m];
}
}
*reinterpret_cast<halfn<SPINSPERTHREAD>*>(lattice + elem_offset) = lij;
}
}
template<bool is_black>
__global__ void add_boundaries(const __half* __restrict__ lattice,
__half* nn_sums,
const int nbx,
const int nby,
const long long offset) {
const long long tid = static_cast<long long>(blockDim.x) * blockIdx.x + threadIdx.x + offset;
// subblock i,j (1 thread block per subblock)
int bi = tid / LATTICE_SUB_N % (2 * nbx);
int bj = tid / (LATTICE_SUB_N * 2 * nbx);
// subblock local i
int il = tid % LATTICE_SUB_N;
if (bj >= nby) return;
// Offset threads depending on parity and color
int jl, jb;
if (is_black) {
if (bi % 2) {
bj = 2*bj + 1;
jl = LATTICE_SUB_N - 1;
jb = 0;
} else {
bj = 2*bj;
jl = 0;
jb = LATTICE_SUB_N - 1;
}
} else {
if (bi % 2) {
bj = 2*bj;
jl = 0;
jb = LATTICE_SUB_N - 1;
} else {
bj = 2*bj + 1;
jl = LATTICE_SUB_N - 1;
jb = 0;
}
}
int bn = 2*nbx;
int bm = 2*nby;
int bin = (bi - 1 >= 0) ? bi - 1 : bn - 1;
int bip = (bi + 1 < bn) ? bi + 1 : 0;
int bjn = (bj - 1 >= 0) ? bj - 1 : bm - 1;
int bjp = (bj + 1 < bm) ? bj + 1 : 0;
// Update LR
size_t boundary_offset;
if (jl == 0) {
boundary_offset = SUP_OFFSET(bi/2, bjn/2, nbx) + SUB_OFFSET(bi%2, bjn%2);
} else {
boundary_offset = SUP_OFFSET(bi/2, bjp/2, nbx) + SUB_OFFSET(bi%2, bjp%2);
}
size_t local_offset = SUP_OFFSET(bi/2, bj/2, nbx) + SUB_OFFSET(bi%2, bj%2);
*(nn_sums + local_offset + SUB_ELEM(il, jl)) += *(lattice + boundary_offset + SUB_ELEM(il, jb));
// Update UD
if (!is_black) {
jl = (jl == 0) ? LATTICE_SUB_N - 1 : 0;
jb = (jb == 0) ? LATTICE_SUB_N - 1 : 0;
}
if (jl == 0) {
boundary_offset = SUP_OFFSET(bin/2, bj/2, nbx) + SUB_OFFSET(bin%2, bj%2);
} else {
boundary_offset = SUP_OFFSET(bip/2, bj/2, nbx) + SUB_OFFSET(bip%2, bj%2);
}
__half bval = *(lattice + boundary_offset + SUB_ELEM(jb, il));
__syncthreads();
*(nn_sums + local_offset + SUB_ELEM(jl, il)) += bval;
}
void sync(int nGPUs) {
// Sync all devices
for (int dev = 0; dev < nGPUs; dev++) {
CHECK_CUDA(hipSetDevice(dev));
CHECK_CUDA(hipDeviceSynchronize());
}
}
void update(__half **Ab0, __half **Bb0, __half **Ab1, __half **Bb1, __half **Cb,
__half **Aw0, __half **Bw0, __half **Aw1, __half **Bw1, __half **Cw,
__half *lattice, float inv_temp, __half *nn_sums, hipblasHandle_t *cublas_handles, int iter,
int nbx, int nby, unsigned long long seed, int nGPUs) {
int batchCount = 2 * nbx * nby;
int batchCountPerGPU = batchCount / nGPUs;
__half alpha = __float2half(1.0f);
__half beta0 = __float2half(0.0f);
__half beta1 = __float2half(1.0f);
// Update black
for (int dev = 0; dev < nGPUs; dev++) {
CHECK_CUDA(hipSetDevice(dev));
CHECK_CUBLAS(hipblasGemmBatchedEx(cublas_handles[dev], HIPBLAS_OP_N, HIPBLAS_OP_N, LATTICE_SUB_N, LATTICE_SUB_N, LATTICE_SUB_N,
&alpha, (void**) &Ab0[dev * batchCountPerGPU], HIP_R_16F, LATTICE_SUB_N,
(void**) &Bb0[dev * batchCountPerGPU], HIP_R_16F, LATTICE_SUB_N, &beta0,
(void**) &Cb[dev * batchCountPerGPU], HIP_R_16F, LATTICE_SUB_N, batchCountPerGPU,
HIP_R_16F, CUBLAS_GEMM_ALGO0_TENSOR_OP));
CHECK_CUBLAS(hipblasGemmBatchedEx(cublas_handles[dev], HIPBLAS_OP_N, HIPBLAS_OP_N, LATTICE_SUB_N, LATTICE_SUB_N, LATTICE_SUB_N,
&alpha, (void**) &Ab1[dev * batchCountPerGPU], HIP_R_16F, LATTICE_SUB_N,
(void**) &Bb1[dev * batchCountPerGPU], HIP_R_16F, LATTICE_SUB_N, &beta1,
(void**) &Cb[dev * batchCountPerGPU], HIP_R_16F, LATTICE_SUB_N, batchCountPerGPU,
HIP_R_16F, CUBLAS_GEMM_ALGO0_TENSOR_OP));
int blocks = (2 * nbx * nby);
int blocksPerGPU = blocks / nGPUs;
hipLaunchKernelGGL(( add_boundaries<true>), dim3(blocksPerGPU), dim3(THREADS), 0, 0, lattice, nn_sums, nbx, nby, dev * ((long long)blocksPerGPU * THREADS));
blocks = (2 * nbx * nby * LATTICE_SUB_N) / (NLOOPS * SPINSPERTHREAD);
blocksPerGPU = blocks / nGPUs;
hipLaunchKernelGGL(( update_spins<true>), dim3(blocksPerGPU), dim3(THREADS), 0, 0, lattice, inv_temp, nn_sums, seed, (2*iter) * (NLOOPS * SPINSPERTHREAD), nbx, nby, dev * ((long long)blocksPerGPU * THREADS));
}
sync(nGPUs);
// Update white
for (int dev = 0; dev < nGPUs; dev++) {
hipSetDevice(dev);
CHECK_CUBLAS(hipblasGemmBatchedEx(cublas_handles[dev], HIPBLAS_OP_N, HIPBLAS_OP_N, LATTICE_SUB_N, LATTICE_SUB_N, LATTICE_SUB_N,
&alpha, (void**) &Aw0[dev * batchCountPerGPU], HIP_R_16F, LATTICE_SUB_N,
(void**) &Bw0[dev * batchCountPerGPU], HIP_R_16F, LATTICE_SUB_N, &beta0,
(void**) &Cw[dev * batchCountPerGPU], HIP_R_16F, LATTICE_SUB_N, batchCountPerGPU,
HIP_R_16F, CUBLAS_GEMM_ALGO0_TENSOR_OP));
CHECK_CUBLAS(hipblasGemmBatchedEx(cublas_handles[dev], HIPBLAS_OP_N, HIPBLAS_OP_N, LATTICE_SUB_N, LATTICE_SUB_N, LATTICE_SUB_N,
&alpha, (void**) &Aw1[dev * batchCountPerGPU], HIP_R_16F, LATTICE_SUB_N,
(void**) &Bw1[dev * batchCountPerGPU], HIP_R_16F, LATTICE_SUB_N, &beta1,
(void**) &Cw[dev * batchCountPerGPU], HIP_R_16F, LATTICE_SUB_N, batchCountPerGPU,
HIP_R_16F, CUBLAS_GEMM_ALGO0_TENSOR_OP));
int blocks = (2 * nbx * nby);
int blocksPerGPU = blocks / nGPUs;
hipLaunchKernelGGL(( add_boundaries<false>), dim3(blocksPerGPU), dim3(THREADS), 0, 0, lattice, nn_sums, nbx, nby, dev * ((long long)blocksPerGPU * THREADS));
blocks = (2 * nbx * nby * LATTICE_SUB_N) / (NLOOPS * SPINSPERTHREAD);
blocksPerGPU = blocks / nGPUs;
hipLaunchKernelGGL(( update_spins<false>), dim3(blocksPerGPU), dim3(THREADS), 0, 0, lattice, inv_temp, nn_sums, seed, (2*iter + 1) * (NLOOPS * SPINSPERTHREAD), nbx, nby, dev * ((long long)blocksPerGPU * THREADS));
}
sync(nGPUs);
}
void write_lattice(__half *lattice, std::string filename, int nbx, int nby, int nGPUs) {
printf("Writing lattice to %s...\n", filename.c_str());
long long nx = nbx * LATTICE_SUP_N;
long long ny = nby * LATTICE_SUP_N;
__half* lattice_h;
float* lattice_true_h;
lattice_h = (__half*) malloc(nx * ny * sizeof(*lattice_h));
lattice_true_h = (float*) malloc(nx * ny * sizeof(*lattice_true_h));
long spinsPerGPU = nx * (ny/nGPUs);
// Copy out full lattice to host
for (int dev = 0; dev < nGPUs; dev++) {
CHECK_CUDA(hipSetDevice(dev));
CHECK_CUDA(hipMemcpy(&lattice_h[dev * spinsPerGPU], &lattice[dev * spinsPerGPU], spinsPerGPU * sizeof(*lattice_h), hipMemcpyDeviceToHost));
}
// Write file
for (int bj = 0; bj < nby; bj++) {
for (int bi = 0; bi < nbx; bi++) {
__half* l00 = lattice_h + SUP_OFFSET(bi, bj, nbx) + SUB_OFFSET(0, 0);
__half* l01 = lattice_h + SUP_OFFSET(bi, bj, nbx) + SUB_OFFSET(0, 1);
__half* l10 = lattice_h + SUP_OFFSET(bi, bj, nbx) + SUB_OFFSET(1, 0);
__half* l11 = lattice_h + SUP_OFFSET(bi, bj, nbx) + SUB_OFFSET(1, 1);
long long offset = (bj * LATTICE_SUP_N) * nx + (bi * LATTICE_SUP_N);
for(int j = 0; j < LATTICE_SUB_N; j++) {
for(int i = 0; i < LATTICE_SUB_N; i++) {
lattice_true_h[offset + (2*j) * nx + (2*i)] = __half2float(*(l00 + SUB_ELEM(i, j)));
lattice_true_h[offset + (2*j + 1) * nx + (2*i + 1)] = __half2float(*(l11 + SUB_ELEM(i, j)));
lattice_true_h[offset + (2*j) * nx + (2*i + 1)] = __half2float(*(l10 + SUB_ELEM(i, j)));
lattice_true_h[offset + (2*j + 1) * nx + (2*i)] = __half2float(*(l01 + SUB_ELEM(i, j)));
}
}
}
}
std::ofstream f;
f.open(filename);
if (f.is_open()) {
for (long long j = 0; j < ny; j++) {
for (long long i = 0; i < nx; i++) {
f << lattice_true_h[j * nx + i] << " ";
}
f << std::endl;
}
}
f.close();
free(lattice_h);
free(lattice_true_h);
}
static void usage(const char *pname) {
const char *bname = rindex(pname, '/');
if (!bname) {bname = pname;}
else {bname++;}
fprintf(stdout,
"Usage: %s [options]\n"
"options:\n"
"\t-x|--lattice-nbx <LATTICE_NBX>\n"
"\t\tnumber of blocks along lattice rows (number of rows / 256)\n"
"\n"
"\t-y|--lattice-nby <LATTICE_NBY>\n"
"\t\tnumber of blocks along lattice columns (number of columns / 256)\n"
"\n"
"\t-g|--ngpus <NGPUS>\n"
"\t\tnumber of GPUs to use for simulation\n"
"\n"
"\t-w|--nwarmup <NWARMUP>\n"
"\t\tnumber of warmup iterations\n"
"\n"
"\t-n|--niters <NITERS>\n"
"\t\tnumber of trial iterations\n"
"\n"
"\t-a|--alpha <ALPHA>\n"
"\t\tcoefficient of critical temperature\n"
"\n"
"\t-s|--seed <SEED>\n"
"\t\tseed for random number generation\n"
"\n"
"\t-o|--write-lattice\n"
"\t\twrite final lattice configuration to file\n\n",
bname);
exit(EXIT_SUCCESS);
}
int main(int argc, char **argv) {
// Defaults
int nbx = 10; // Lattice rows dimension (in number of super blocks)
int nby = 10; // Lattice columns dimension (in number of super blocks)
float alpha = 0.1f; // coefficient of critical temperature
int niter = 1000;
int nwarmup = 100;
bool write = false;
int nGPUs = 1;
unsigned long long seed = 1234ULL;
while (1) {
static struct option long_options[] = {
{ "lattice-nbx", required_argument, 0, 'x'},
{ "lattice-nby", required_argument, 0, 'y'},
{ "ngpus", required_argument, 0, 'g'},
{ "seed", required_argument, 0, 's'},
{ "nwarmup", required_argument, 0, 'w'},
{ "niter", required_argument, 0, 'n'},
{ "write-lattice", no_argument, 0, 'o'},
{ "help", no_argument, 0, 'h'},
{ 0, 0, 0, 0}
};
int option_index = 0;
int ch = getopt_long(argc, argv, "x:y:g:a:s:w:n:oh", long_options, &option_index);
if (ch == -1) break;
switch(ch) {
case 0:
break;
case 'x':
nbx = atoi(optarg); break;
case 'y':
nby = atoi(optarg); break;
case 'g':
nGPUs = atoi(optarg); break;
case 'a':
alpha = atof(optarg); break;
case 's':
seed = atoll(optarg); break;
case 'w':
nwarmup = atoi(optarg); break;
case 'n':
niter = atoi(optarg); break;
case 'o':
write = true; break;
case 'h':
usage(argv[0]); break;
case '?':
exit(EXIT_FAILURE);
default:
fprintf(stderr, "unknown option: %c\n", ch);
exit(EXIT_FAILURE);
}
}
if (nby % nGPUs != 0) {
fprintf(stderr, "ERROR: Number of super blocks in y dimension must be multiple of number of gpus.\n");
exit(EXIT_FAILURE);
}
long long nx = nbx * LATTICE_SUP_N;
long long ny = nby * LATTICE_SUP_N;
__half* lattice;
__half* nn_sums;
__half* k;
__half* kT;
CHECK_CUDA(hipMallocManaged(&lattice, nx * ny * sizeof(*lattice)));
CHECK_CUDA(hipMallocManaged(&nn_sums, nx * ny * sizeof(*nn_sums)));
CHECK_CUDA(hipMallocManaged(&k, LATTICE_SUB_N * LATTICE_SUB_N * sizeof(*k)));
CHECK_CUDA(hipMallocManaged(&kT, LATTICE_SUB_N * LATTICE_SUB_N * sizeof(*kT)));
for (int dev = 0; dev < nGPUs; dev++) {
CHECK_CUDA(hipMemAdvise(k, LATTICE_SUB_N * LATTICE_SUB_N * sizeof(*k), hipMemAdviseSetReadMostly, dev));
CHECK_CUDA(hipMemAdvise(k, LATTICE_SUB_N * LATTICE_SUB_N * sizeof(*k), hipMemAdviseSetAccessedBy, dev));
CHECK_CUDA(hipMemAdvise(kT, LATTICE_SUB_N * LATTICE_SUB_N * sizeof(*kT), hipMemAdviseSetReadMostly, dev));
CHECK_CUDA(hipMemAdvise(kT, LATTICE_SUB_N * LATTICE_SUB_N * sizeof(*kT), hipMemAdviseSetAccessedBy, dev));
}
long long spinsPerGPU = nx * ny / nGPUs;
for (int dev = 0; dev < nGPUs; dev++) {
CHECK_CUDA(hipMemAdvise(&lattice[dev * spinsPerGPU], spinsPerGPU * sizeof(*lattice), hipMemAdviseSetPreferredLocation, dev));
CHECK_CUDA(hipMemAdvise(&nn_sums[dev * spinsPerGPU], spinsPerGPU * sizeof(*nn_sums), hipMemAdviseSetPreferredLocation, dev));
}
hipblasHandle_t* cublas_handles;
cublas_handles = (hipblasHandle_t*) malloc(nGPUs * sizeof(hipblasHandle_t));
for (int dev = 0; dev < nGPUs; dev++) {
CHECK_CUDA(hipSetDevice(dev));
CHECK_CUBLAS(hipblasCreate(&cublas_handles[dev]));
CHECK_CUBLAS(cublasSetMathMode(cublas_handles[dev], CUBLAS_TENSOR_OP_MATH));
}
// Setup k and k transpose matrices
CHECK_CUDA(hipSetDevice(0));
int blocks = (LATTICE_SUB_N * LATTICE_SUB_N + THREADS - 1) / THREADS;
hipLaunchKernelGGL(( set_k), dim3(blocks), dim3(THREADS), 0, 0, k, kT);
// Initialize lattice spins randomly
for (int dev = 0; dev < nGPUs; dev++) {
CHECK_CUDA(hipSetDevice(dev));
blocks = (nx * ny + THREADS - 1) / THREADS;
int blocksPerGPU = blocks/nGPUs;
hipLaunchKernelGGL(( init_spins), dim3(blocksPerGPU), dim3(THREADS), 0, 0, lattice, seed, nbx, nby, dev * nx * (ny/nGPUs));
}
sync(nGPUs);
// Setup pointers for batched GEMMS
__half **Ab0, **Bb0;
__half **Ab1, **Bb1;
__half **Aw0, **Bw0;
__half **Aw1, **Bw1;
__half **Cb, **Cw;
int batchCount = 2 * (nbx * nby);
int batchCountPerGPU = batchCount / nGPUs;
CHECK_CUDA(hipMallocManaged(&Ab0, batchCount * sizeof(*Ab0)));
CHECK_CUDA(hipMallocManaged(&Bb0, batchCount * sizeof(*Bb0)));
CHECK_CUDA(hipMallocManaged(&Ab1, batchCount * sizeof(*Ab1)));
CHECK_CUDA(hipMallocManaged(&Bb1, batchCount * sizeof(*Bb1)));
CHECK_CUDA(hipMallocManaged(&Aw0, batchCount * sizeof(*Aw0)));
CHECK_CUDA(hipMallocManaged(&Bw0, batchCount * sizeof(*Bw0)));
CHECK_CUDA(hipMallocManaged(&Aw1, batchCount * sizeof(*Aw1)));
CHECK_CUDA(hipMallocManaged(&Bw1, batchCount * sizeof(*Bw1)));
CHECK_CUDA(hipMallocManaged(&Cb, batchCount * sizeof(*Cb)));
CHECK_CUDA(hipMallocManaged(&Cw, batchCount * sizeof(*Cw)));
for (int dev = 0; dev < nGPUs; dev++) {
CHECK_CUDA(hipMemAdvise(&Ab0[dev * batchCountPerGPU], batchCountPerGPU * sizeof(*Ab0), hipMemAdviseSetPreferredLocation, dev));
CHECK_CUDA(hipMemAdvise(&Bb0[dev * batchCountPerGPU], batchCountPerGPU * sizeof(*Bb0), hipMemAdviseSetPreferredLocation, dev));
CHECK_CUDA(hipMemAdvise(&Ab1[dev * batchCountPerGPU], batchCountPerGPU * sizeof(*Ab1), hipMemAdviseSetPreferredLocation, dev));
CHECK_CUDA(hipMemAdvise(&Bb1[dev * batchCountPerGPU], batchCountPerGPU * sizeof(*Bb1), hipMemAdviseSetPreferredLocation, dev));
CHECK_CUDA(hipMemAdvise(&Aw0[dev * batchCountPerGPU], batchCountPerGPU * sizeof(*Aw0), hipMemAdviseSetPreferredLocation, dev));
CHECK_CUDA(hipMemAdvise(&Bw0[dev * batchCountPerGPU], batchCountPerGPU * sizeof(*Bw0), hipMemAdviseSetPreferredLocation, dev));
CHECK_CUDA(hipMemAdvise(&Aw1[dev * batchCountPerGPU], batchCountPerGPU * sizeof(*Aw1), hipMemAdviseSetPreferredLocation, dev));
CHECK_CUDA(hipMemAdvise(&Bw1[dev * batchCountPerGPU], batchCountPerGPU * sizeof(*Bw1), hipMemAdviseSetPreferredLocation, dev));
CHECK_CUDA(hipMemAdvise(&Cb[dev * batchCountPerGPU], batchCountPerGPU * sizeof(*Cb), hipMemAdviseSetPreferredLocation, dev));
CHECK_CUDA(hipMemAdvise(&Cw[dev * batchCountPerGPU], batchCountPerGPU * sizeof(*Cw), hipMemAdviseSetPreferredLocation, dev));
}
int idx = 0;
for (int bj = 0; bj < nby; bj++) {
for (int bi = 0; bi < nbx; bi++) {
__half* nn_sums00 = nn_sums + SUP_OFFSET(bi, bj, nbx) + SUB_OFFSET(0, 0);
__half* nn_sums11 = nn_sums + SUP_OFFSET(bi, bj, nbx) + SUB_OFFSET(1, 1);
__half* nn_sums01 = nn_sums + SUP_OFFSET(bi, bj, nbx) + SUB_OFFSET(0, 1);
__half* nn_sums10 = nn_sums + SUP_OFFSET(bi, bj, nbx) + SUB_OFFSET(1, 0);
__half* lat00 = lattice + SUP_OFFSET(bi, bj, nbx) + SUB_OFFSET(0, 0);
__half* lat11 = lattice + SUP_OFFSET(bi, bj, nbx) + SUB_OFFSET(1, 1);
__half* lat01 = lattice + SUP_OFFSET(bi, bj, nbx) + SUB_OFFSET(0, 1);
__half* lat10 = lattice + SUP_OFFSET(bi, bj, nbx) + SUB_OFFSET(1, 0);
// Black:
//nn_sum(0,0) = lattice(0,1) x K + K^T x lattice(1,0)
//nn_sum(1,1) = lattice(1,0) x K^T + K x lattice(0,1)
Ab0[idx ] = lat01; Bb0[idx ] = k;
Ab0[idx+1] = lat10; Bb0[idx+1] = kT;
Ab1[idx ] = kT; Bb1[idx ] = lat10;
Ab1[idx+1] = k; Bb1[idx+1] = lat01;
Cb[idx ] = nn_sums00;
Cb[idx+1] = nn_sums11;
// White:
//nn_sum(1,0) = lattice(1,1) x K + K x lattice(0,0)
//nn_sum(0,1) = lattice(0,0) x K^T + K^T x lattice(1,1)
Aw0[idx ] = lat00 ; Bw0[idx ] = kT;
Aw0[idx+1] = lat11 ; Bw0[idx+1] = k;
Aw1[idx ] = kT; Bw1[idx ] = lat11;
Aw1[idx+1] = k; Bw1[idx+1] = lat00;
Cw[idx ] = nn_sums01;
Cw[idx+1] = nn_sums10;
idx += 2;
}
}
sync(nGPUs);
float inv_temp = 1.0f / (alpha*TCRIT);
// Warmup
printf("Starting warmup...\n");
for (int n = 0; n < nwarmup; n++) {
update(Ab0, Bb0, Ab1, Bb1, Cb, Aw0, Bw0, Aw1, Bw1, Cw,
lattice, inv_temp, nn_sums, cublas_handles, n+1, nbx, nby, seed, nGPUs);
}
sync(nGPUs);
printf("Starting trial iterations...\n");
auto t0 = std::chrono::high_resolution_clock::now();
for (int n = nwarmup; n < niter + nwarmup; n++) {
update(Ab0, Bb0, Ab1, Bb1, Cb, Aw0, Bw0, Aw1, Bw1, Cw,
lattice, inv_temp, nn_sums, cublas_handles, n+1, nbx, nby, seed, nGPUs);
if ((n - nwarmup) % 1000 == 0) printf("Completed %d/%d iterations...\n", n - nwarmup + 1, niter);
}
sync(nGPUs);
auto t1 = std::chrono::high_resolution_clock::now();
double duration = (double) std::chrono::duration_cast<std::chrono::microseconds>(t1-t0).count();
printf("REPORT:\n");
printf("\tnGPUs: %d\n", nGPUs);
printf("\ttemperature: %f * %f\n", alpha, TCRIT);
printf("\tseed: %llu\n", seed);
printf("\twarmup iterations: %d\n", nwarmup);
printf("\ttrial iterations: %d\n", niter);
printf("\tlattice dimensions: %lld x %lld\n", nx, ny);
printf("\telapsed time: %f sec\n", duration * 1e-6);
printf("\tupdates per ns: %f\n", (double) (nx * ny) * niter / duration * 1e-3);
// Compute average magnetism
double* devsums;
int nchunks = (spinsPerGPU + CUB_CHUNK_SIZE - 1)/ CUB_CHUNK_SIZE;
CHECK_CUDA(hipMallocManaged(&devsums, nGPUs * nchunks * sizeof(*devsums)));
for (int dev = 0 ; dev < nGPUs; dev++) {
CHECK_CUDA(hipSetDevice(dev));
size_t cub_workspace_bytes = 0;
void* workspace = NULL;
CHECK_CUDA(hipcub::DeviceReduce::Sum(workspace, cub_workspace_bytes, &lattice[dev * spinsPerGPU], &devsums[dev*nchunks], CUB_CHUNK_SIZE));
CHECK_CUDA(hipMalloc(&workspace, cub_workspace_bytes));
for (int n = 0; n < nchunks; n++) {
CHECK_CUDA(hipcub::DeviceReduce::Sum(workspace, cub_workspace_bytes, &lattice[dev * spinsPerGPU + n*CUB_CHUNK_SIZE],
&devsums[dev * nchunks + n], ::min((long long) CUB_CHUNK_SIZE, spinsPerGPU - n * CUB_CHUNK_SIZE)));
}
CHECK_CUDA(hipFree(workspace));
}
sync(nGPUs);
double hostsum = 0;
for (int n = 0; n < nGPUs * nchunks; n++) {
hostsum += devsums[n];
}
std::cout << "\taverage magnetism (absolute): " << abs(hostsum / (nx * ny)) << std::endl;
CHECK_CUDA(hipFree(devsums));
if (write) write_lattice(lattice, "final.txt", nbx, nby, nGPUs);
return 0;
}
| 3481a05252cb7900dde3523f7fcd5999b360edaa.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <chrono>
#include <fstream>
#include <getopt.h>
#include <iostream>
#include <string>
#include <cuda_fp16.h>
#include <curand_kernel.h>
#include <cublas_v2.h>
#include <cub/cub.cuh>
#include "cudamacro.h"
#define LATTICE_SUP_N (256)
#define LATTICE_SUB_N (LATTICE_SUP_N / 2)
#define TCRIT 2.26918531421f
#define THREADS (LATTICE_SUB_N)
#define SUP_OFFSET(i,j,nbx) (((j)*(long long)(nbx) + (i))*LATTICE_SUP_N*LATTICE_SUP_N)
#define SUB_OFFSET(i,j) (((j)*LATTICE_SUP_N + (i)*LATTICE_SUB_N)*LATTICE_SUB_N)
#define SUB_ELEM(i,j) ((j)*LATTICE_SUB_N + (i))
#define CUB_CHUNK_SIZE ((1ll<<31) - (1ll<<28))
__global__ void set_k(__half* k, __half* kT) {
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const int i = tid % LATTICE_SUB_N;
const int j = tid / LATTICE_SUB_N;
if (j >= LATTICE_SUB_N) return;
__half val = __float2half(0.0f);
if (i == j || i + 1 == j) {
val = __float2half(1.0f);
}
k[j*LATTICE_SUB_N + i] = val;
kT[i*LATTICE_SUB_N + j] = val;
}
__global__ void init_spins(__half* lattice,
const unsigned long long seed,
const int nbx,
const int nby,
const long long offset) {
const long long tid = static_cast<long long>(blockDim.x) * blockIdx.x + threadIdx.x + offset;
const long long nx = nbx * LATTICE_SUP_N;
const long long ny = nby * LATTICE_SUP_N;
if (tid >= nx * ny) return;
curandStatePhilox4_32_10_t state;
curand_init(seed, tid, 0, &state);
float randval = curand_uniform(&state);
__half val = (randval < 0.5f) ? __float2half(-1.0f) : __float2half(1.0f);
lattice[tid] = val;
}
template <int N>
struct __align__(sizeof(__half)*N) halfn {
__half val[N];
};
#define NLOOPS 2
#define SPINSPERTHREAD 8
template<bool is_black>
__global__ void update_spins(__half* lattice,
float inv_temp,
const __half* __restrict__ nn_sums,
const unsigned long long seed,
const unsigned long long iter,
const int nbx,
const int nby,
const long long offset) {
const long long tid = static_cast<long long>(blockDim.x) * blockIdx.x + threadIdx.x + offset;
const int threads_per_subblock = LATTICE_SUB_N * LATTICE_SUB_N / (NLOOPS * SPINSPERTHREAD);
int bi = tid / threads_per_subblock % (2 * nbx);
int bj = tid / (threads_per_subblock * 2 * nbx);
// subblock local thread idx
int tl = tid % threads_per_subblock;
if (bj >= nby) return;
// Offset threads depending on parity and color
if (is_black) {
if (bi % 2) {
bj = 2*bj + 1;
} else {
bj = 2*bj;
}
} else {
if (bi % 2) {
bj = 2*bj;
} else {
bj = 2*bj + 1;
}
}
curandStatePhilox4_32_10_t state;
curand_init(seed, tid, iter, &state);
#pragma unroll
for (int n = 0; n < NLOOPS; n++) {
size_t elem_offset = SUP_OFFSET(bi/2, bj/2, nbx) + SUB_OFFSET(bi%2, bj%2) + (tl + n * threads_per_subblock) * SPINSPERTHREAD;
halfn<SPINSPERTHREAD> lij = *(reinterpret_cast<halfn<SPINSPERTHREAD>*>(lattice + elem_offset));
const halfn<SPINSPERTHREAD> nn = *(reinterpret_cast<const halfn<SPINSPERTHREAD>*>(nn_sums + elem_offset));
#pragma unroll
for (int m = 0; m < SPINSPERTHREAD; m++) {
float randval = curand_uniform(&state);
float accept = exp(-2.0f * inv_temp * __half2float(nn.val[m] * lij.val[m]));
if (randval < accept) {
lij.val[m] = -lij.val[m];
}
}
*reinterpret_cast<halfn<SPINSPERTHREAD>*>(lattice + elem_offset) = lij;
}
}
template<bool is_black>
__global__ void add_boundaries(const __half* __restrict__ lattice,
__half* nn_sums,
const int nbx,
const int nby,
const long long offset) {
const long long tid = static_cast<long long>(blockDim.x) * blockIdx.x + threadIdx.x + offset;
// subblock i,j (1 thread block per subblock)
int bi = tid / LATTICE_SUB_N % (2 * nbx);
int bj = tid / (LATTICE_SUB_N * 2 * nbx);
// subblock local i
int il = tid % LATTICE_SUB_N;
if (bj >= nby) return;
// Offset threads depending on parity and color
int jl, jb;
if (is_black) {
if (bi % 2) {
bj = 2*bj + 1;
jl = LATTICE_SUB_N - 1;
jb = 0;
} else {
bj = 2*bj;
jl = 0;
jb = LATTICE_SUB_N - 1;
}
} else {
if (bi % 2) {
bj = 2*bj;
jl = 0;
jb = LATTICE_SUB_N - 1;
} else {
bj = 2*bj + 1;
jl = LATTICE_SUB_N - 1;
jb = 0;
}
}
int bn = 2*nbx;
int bm = 2*nby;
int bin = (bi - 1 >= 0) ? bi - 1 : bn - 1;
int bip = (bi + 1 < bn) ? bi + 1 : 0;
int bjn = (bj - 1 >= 0) ? bj - 1 : bm - 1;
int bjp = (bj + 1 < bm) ? bj + 1 : 0;
// Update LR
size_t boundary_offset;
if (jl == 0) {
boundary_offset = SUP_OFFSET(bi/2, bjn/2, nbx) + SUB_OFFSET(bi%2, bjn%2);
} else {
boundary_offset = SUP_OFFSET(bi/2, bjp/2, nbx) + SUB_OFFSET(bi%2, bjp%2);
}
size_t local_offset = SUP_OFFSET(bi/2, bj/2, nbx) + SUB_OFFSET(bi%2, bj%2);
*(nn_sums + local_offset + SUB_ELEM(il, jl)) += *(lattice + boundary_offset + SUB_ELEM(il, jb));
// Update UD
if (!is_black) {
jl = (jl == 0) ? LATTICE_SUB_N - 1 : 0;
jb = (jb == 0) ? LATTICE_SUB_N - 1 : 0;
}
if (jl == 0) {
boundary_offset = SUP_OFFSET(bin/2, bj/2, nbx) + SUB_OFFSET(bin%2, bj%2);
} else {
boundary_offset = SUP_OFFSET(bip/2, bj/2, nbx) + SUB_OFFSET(bip%2, bj%2);
}
__half bval = *(lattice + boundary_offset + SUB_ELEM(jb, il));
__syncthreads();
*(nn_sums + local_offset + SUB_ELEM(jl, il)) += bval;
}
void sync(int nGPUs) {
// Sync all devices
for (int dev = 0; dev < nGPUs; dev++) {
CHECK_CUDA(cudaSetDevice(dev));
CHECK_CUDA(cudaDeviceSynchronize());
}
}
void update(__half **Ab0, __half **Bb0, __half **Ab1, __half **Bb1, __half **Cb,
__half **Aw0, __half **Bw0, __half **Aw1, __half **Bw1, __half **Cw,
__half *lattice, float inv_temp, __half *nn_sums, cublasHandle_t *cublas_handles, int iter,
int nbx, int nby, unsigned long long seed, int nGPUs) {
int batchCount = 2 * nbx * nby;
int batchCountPerGPU = batchCount / nGPUs;
__half alpha = __float2half(1.0f);
__half beta0 = __float2half(0.0f);
__half beta1 = __float2half(1.0f);
// Update black
for (int dev = 0; dev < nGPUs; dev++) {
CHECK_CUDA(cudaSetDevice(dev));
CHECK_CUBLAS(cublasGemmBatchedEx(cublas_handles[dev], CUBLAS_OP_N, CUBLAS_OP_N, LATTICE_SUB_N, LATTICE_SUB_N, LATTICE_SUB_N,
&alpha, (void**) &Ab0[dev * batchCountPerGPU], CUDA_R_16F, LATTICE_SUB_N,
(void**) &Bb0[dev * batchCountPerGPU], CUDA_R_16F, LATTICE_SUB_N, &beta0,
(void**) &Cb[dev * batchCountPerGPU], CUDA_R_16F, LATTICE_SUB_N, batchCountPerGPU,
CUDA_R_16F, CUBLAS_GEMM_ALGO0_TENSOR_OP));
CHECK_CUBLAS(cublasGemmBatchedEx(cublas_handles[dev], CUBLAS_OP_N, CUBLAS_OP_N, LATTICE_SUB_N, LATTICE_SUB_N, LATTICE_SUB_N,
&alpha, (void**) &Ab1[dev * batchCountPerGPU], CUDA_R_16F, LATTICE_SUB_N,
(void**) &Bb1[dev * batchCountPerGPU], CUDA_R_16F, LATTICE_SUB_N, &beta1,
(void**) &Cb[dev * batchCountPerGPU], CUDA_R_16F, LATTICE_SUB_N, batchCountPerGPU,
CUDA_R_16F, CUBLAS_GEMM_ALGO0_TENSOR_OP));
int blocks = (2 * nbx * nby);
int blocksPerGPU = blocks / nGPUs;
add_boundaries<true><<<blocksPerGPU, THREADS>>>(lattice, nn_sums, nbx, nby, dev * ((long long)blocksPerGPU * THREADS));
blocks = (2 * nbx * nby * LATTICE_SUB_N) / (NLOOPS * SPINSPERTHREAD);
blocksPerGPU = blocks / nGPUs;
update_spins<true><<<blocksPerGPU, THREADS>>>(lattice, inv_temp, nn_sums, seed, (2*iter) * (NLOOPS * SPINSPERTHREAD), nbx, nby, dev * ((long long)blocksPerGPU * THREADS));
}
sync(nGPUs);
// Update white
for (int dev = 0; dev < nGPUs; dev++) {
cudaSetDevice(dev);
CHECK_CUBLAS(cublasGemmBatchedEx(cublas_handles[dev], CUBLAS_OP_N, CUBLAS_OP_N, LATTICE_SUB_N, LATTICE_SUB_N, LATTICE_SUB_N,
&alpha, (void**) &Aw0[dev * batchCountPerGPU], CUDA_R_16F, LATTICE_SUB_N,
(void**) &Bw0[dev * batchCountPerGPU], CUDA_R_16F, LATTICE_SUB_N, &beta0,
(void**) &Cw[dev * batchCountPerGPU], CUDA_R_16F, LATTICE_SUB_N, batchCountPerGPU,
CUDA_R_16F, CUBLAS_GEMM_ALGO0_TENSOR_OP));
CHECK_CUBLAS(cublasGemmBatchedEx(cublas_handles[dev], CUBLAS_OP_N, CUBLAS_OP_N, LATTICE_SUB_N, LATTICE_SUB_N, LATTICE_SUB_N,
&alpha, (void**) &Aw1[dev * batchCountPerGPU], CUDA_R_16F, LATTICE_SUB_N,
(void**) &Bw1[dev * batchCountPerGPU], CUDA_R_16F, LATTICE_SUB_N, &beta1,
(void**) &Cw[dev * batchCountPerGPU], CUDA_R_16F, LATTICE_SUB_N, batchCountPerGPU,
CUDA_R_16F, CUBLAS_GEMM_ALGO0_TENSOR_OP));
int blocks = (2 * nbx * nby);
int blocksPerGPU = blocks / nGPUs;
add_boundaries<false><<<blocksPerGPU, THREADS>>>(lattice, nn_sums, nbx, nby, dev * ((long long)blocksPerGPU * THREADS));
blocks = (2 * nbx * nby * LATTICE_SUB_N) / (NLOOPS * SPINSPERTHREAD);
blocksPerGPU = blocks / nGPUs;
update_spins<false><<<blocksPerGPU, THREADS>>>(lattice, inv_temp, nn_sums, seed, (2*iter + 1) * (NLOOPS * SPINSPERTHREAD), nbx, nby, dev * ((long long)blocksPerGPU * THREADS));
}
sync(nGPUs);
}
void write_lattice(__half *lattice, std::string filename, int nbx, int nby, int nGPUs) {
printf("Writing lattice to %s...\n", filename.c_str());
long long nx = nbx * LATTICE_SUP_N;
long long ny = nby * LATTICE_SUP_N;
__half* lattice_h;
float* lattice_true_h;
lattice_h = (__half*) malloc(nx * ny * sizeof(*lattice_h));
lattice_true_h = (float*) malloc(nx * ny * sizeof(*lattice_true_h));
long spinsPerGPU = nx * (ny/nGPUs);
// Copy out full lattice to host
for (int dev = 0; dev < nGPUs; dev++) {
CHECK_CUDA(cudaSetDevice(dev));
CHECK_CUDA(cudaMemcpy(&lattice_h[dev * spinsPerGPU], &lattice[dev * spinsPerGPU], spinsPerGPU * sizeof(*lattice_h), cudaMemcpyDeviceToHost));
}
// Write file
for (int bj = 0; bj < nby; bj++) {
for (int bi = 0; bi < nbx; bi++) {
__half* l00 = lattice_h + SUP_OFFSET(bi, bj, nbx) + SUB_OFFSET(0, 0);
__half* l01 = lattice_h + SUP_OFFSET(bi, bj, nbx) + SUB_OFFSET(0, 1);
__half* l10 = lattice_h + SUP_OFFSET(bi, bj, nbx) + SUB_OFFSET(1, 0);
__half* l11 = lattice_h + SUP_OFFSET(bi, bj, nbx) + SUB_OFFSET(1, 1);
long long offset = (bj * LATTICE_SUP_N) * nx + (bi * LATTICE_SUP_N);
for(int j = 0; j < LATTICE_SUB_N; j++) {
for(int i = 0; i < LATTICE_SUB_N; i++) {
lattice_true_h[offset + (2*j) * nx + (2*i)] = __half2float(*(l00 + SUB_ELEM(i, j)));
lattice_true_h[offset + (2*j + 1) * nx + (2*i + 1)] = __half2float(*(l11 + SUB_ELEM(i, j)));
lattice_true_h[offset + (2*j) * nx + (2*i + 1)] = __half2float(*(l10 + SUB_ELEM(i, j)));
lattice_true_h[offset + (2*j + 1) * nx + (2*i)] = __half2float(*(l01 + SUB_ELEM(i, j)));
}
}
}
}
std::ofstream f;
f.open(filename);
if (f.is_open()) {
for (long long j = 0; j < ny; j++) {
for (long long i = 0; i < nx; i++) {
f << lattice_true_h[j * nx + i] << " ";
}
f << std::endl;
}
}
f.close();
free(lattice_h);
free(lattice_true_h);
}
static void usage(const char *pname) {
const char *bname = rindex(pname, '/');
if (!bname) {bname = pname;}
else {bname++;}
fprintf(stdout,
"Usage: %s [options]\n"
"options:\n"
"\t-x|--lattice-nbx <LATTICE_NBX>\n"
"\t\tnumber of blocks along lattice rows (number of rows / 256)\n"
"\n"
"\t-y|--lattice-nby <LATTICE_NBY>\n"
"\t\tnumber of blocks along lattice columns (number of columns / 256)\n"
"\n"
"\t-g|--ngpus <NGPUS>\n"
"\t\tnumber of GPUs to use for simulation\n"
"\n"
"\t-w|--nwarmup <NWARMUP>\n"
"\t\tnumber of warmup iterations\n"
"\n"
"\t-n|--niters <NITERS>\n"
"\t\tnumber of trial iterations\n"
"\n"
"\t-a|--alpha <ALPHA>\n"
"\t\tcoefficient of critical temperature\n"
"\n"
"\t-s|--seed <SEED>\n"
"\t\tseed for random number generation\n"
"\n"
"\t-o|--write-lattice\n"
"\t\twrite final lattice configuration to file\n\n",
bname);
exit(EXIT_SUCCESS);
}
int main(int argc, char **argv) {
// Defaults
int nbx = 10; // Lattice rows dimension (in number of super blocks)
int nby = 10; // Lattice columns dimension (in number of super blocks)
float alpha = 0.1f; // coefficient of critical temperature
int niter = 1000;
int nwarmup = 100;
bool write = false;
int nGPUs = 1;
unsigned long long seed = 1234ULL;
while (1) {
static struct option long_options[] = {
{ "lattice-nbx", required_argument, 0, 'x'},
{ "lattice-nby", required_argument, 0, 'y'},
{ "ngpus", required_argument, 0, 'g'},
{ "seed", required_argument, 0, 's'},
{ "nwarmup", required_argument, 0, 'w'},
{ "niter", required_argument, 0, 'n'},
{ "write-lattice", no_argument, 0, 'o'},
{ "help", no_argument, 0, 'h'},
{ 0, 0, 0, 0}
};
int option_index = 0;
int ch = getopt_long(argc, argv, "x:y:g:a:s:w:n:oh", long_options, &option_index);
if (ch == -1) break;
switch(ch) {
case 0:
break;
case 'x':
nbx = atoi(optarg); break;
case 'y':
nby = atoi(optarg); break;
case 'g':
nGPUs = atoi(optarg); break;
case 'a':
alpha = atof(optarg); break;
case 's':
seed = atoll(optarg); break;
case 'w':
nwarmup = atoi(optarg); break;
case 'n':
niter = atoi(optarg); break;
case 'o':
write = true; break;
case 'h':
usage(argv[0]); break;
case '?':
exit(EXIT_FAILURE);
default:
fprintf(stderr, "unknown option: %c\n", ch);
exit(EXIT_FAILURE);
}
}
if (nby % nGPUs != 0) {
fprintf(stderr, "ERROR: Number of super blocks in y dimension must be multiple of number of gpus.\n");
exit(EXIT_FAILURE);
}
long long nx = nbx * LATTICE_SUP_N;
long long ny = nby * LATTICE_SUP_N;
__half* lattice;
__half* nn_sums;
__half* k;
__half* kT;
CHECK_CUDA(cudaMallocManaged(&lattice, nx * ny * sizeof(*lattice)));
CHECK_CUDA(cudaMallocManaged(&nn_sums, nx * ny * sizeof(*nn_sums)));
CHECK_CUDA(cudaMallocManaged(&k, LATTICE_SUB_N * LATTICE_SUB_N * sizeof(*k)));
CHECK_CUDA(cudaMallocManaged(&kT, LATTICE_SUB_N * LATTICE_SUB_N * sizeof(*kT)));
for (int dev = 0; dev < nGPUs; dev++) {
CHECK_CUDA(cudaMemAdvise(k, LATTICE_SUB_N * LATTICE_SUB_N * sizeof(*k), cudaMemAdviseSetReadMostly, dev));
CHECK_CUDA(cudaMemAdvise(k, LATTICE_SUB_N * LATTICE_SUB_N * sizeof(*k), cudaMemAdviseSetAccessedBy, dev));
CHECK_CUDA(cudaMemAdvise(kT, LATTICE_SUB_N * LATTICE_SUB_N * sizeof(*kT), cudaMemAdviseSetReadMostly, dev));
CHECK_CUDA(cudaMemAdvise(kT, LATTICE_SUB_N * LATTICE_SUB_N * sizeof(*kT), cudaMemAdviseSetAccessedBy, dev));
}
long long spinsPerGPU = nx * ny / nGPUs;
for (int dev = 0; dev < nGPUs; dev++) {
CHECK_CUDA(cudaMemAdvise(&lattice[dev * spinsPerGPU], spinsPerGPU * sizeof(*lattice), cudaMemAdviseSetPreferredLocation, dev));
CHECK_CUDA(cudaMemAdvise(&nn_sums[dev * spinsPerGPU], spinsPerGPU * sizeof(*nn_sums), cudaMemAdviseSetPreferredLocation, dev));
}
cublasHandle_t* cublas_handles;
cublas_handles = (cublasHandle_t*) malloc(nGPUs * sizeof(cublasHandle_t));
for (int dev = 0; dev < nGPUs; dev++) {
CHECK_CUDA(cudaSetDevice(dev));
CHECK_CUBLAS(cublasCreate(&cublas_handles[dev]));
CHECK_CUBLAS(cublasSetMathMode(cublas_handles[dev], CUBLAS_TENSOR_OP_MATH));
}
// Setup k and k transpose matrices
CHECK_CUDA(cudaSetDevice(0));
int blocks = (LATTICE_SUB_N * LATTICE_SUB_N + THREADS - 1) / THREADS;
set_k<<<blocks, THREADS>>>(k, kT);
// Initialize lattice spins randomly
for (int dev = 0; dev < nGPUs; dev++) {
CHECK_CUDA(cudaSetDevice(dev));
blocks = (nx * ny + THREADS - 1) / THREADS;
int blocksPerGPU = blocks/nGPUs;
init_spins<<<blocksPerGPU, THREADS>>>(lattice, seed, nbx, nby, dev * nx * (ny/nGPUs));
}
sync(nGPUs);
// Setup pointers for batched GEMMS
__half **Ab0, **Bb0;
__half **Ab1, **Bb1;
__half **Aw0, **Bw0;
__half **Aw1, **Bw1;
__half **Cb, **Cw;
int batchCount = 2 * (nbx * nby);
int batchCountPerGPU = batchCount / nGPUs;
CHECK_CUDA(cudaMallocManaged(&Ab0, batchCount * sizeof(*Ab0)));
CHECK_CUDA(cudaMallocManaged(&Bb0, batchCount * sizeof(*Bb0)));
CHECK_CUDA(cudaMallocManaged(&Ab1, batchCount * sizeof(*Ab1)));
CHECK_CUDA(cudaMallocManaged(&Bb1, batchCount * sizeof(*Bb1)));
CHECK_CUDA(cudaMallocManaged(&Aw0, batchCount * sizeof(*Aw0)));
CHECK_CUDA(cudaMallocManaged(&Bw0, batchCount * sizeof(*Bw0)));
CHECK_CUDA(cudaMallocManaged(&Aw1, batchCount * sizeof(*Aw1)));
CHECK_CUDA(cudaMallocManaged(&Bw1, batchCount * sizeof(*Bw1)));
CHECK_CUDA(cudaMallocManaged(&Cb, batchCount * sizeof(*Cb)));
CHECK_CUDA(cudaMallocManaged(&Cw, batchCount * sizeof(*Cw)));
for (int dev = 0; dev < nGPUs; dev++) {
CHECK_CUDA(cudaMemAdvise(&Ab0[dev * batchCountPerGPU], batchCountPerGPU * sizeof(*Ab0), cudaMemAdviseSetPreferredLocation, dev));
CHECK_CUDA(cudaMemAdvise(&Bb0[dev * batchCountPerGPU], batchCountPerGPU * sizeof(*Bb0), cudaMemAdviseSetPreferredLocation, dev));
CHECK_CUDA(cudaMemAdvise(&Ab1[dev * batchCountPerGPU], batchCountPerGPU * sizeof(*Ab1), cudaMemAdviseSetPreferredLocation, dev));
CHECK_CUDA(cudaMemAdvise(&Bb1[dev * batchCountPerGPU], batchCountPerGPU * sizeof(*Bb1), cudaMemAdviseSetPreferredLocation, dev));
CHECK_CUDA(cudaMemAdvise(&Aw0[dev * batchCountPerGPU], batchCountPerGPU * sizeof(*Aw0), cudaMemAdviseSetPreferredLocation, dev));
CHECK_CUDA(cudaMemAdvise(&Bw0[dev * batchCountPerGPU], batchCountPerGPU * sizeof(*Bw0), cudaMemAdviseSetPreferredLocation, dev));
CHECK_CUDA(cudaMemAdvise(&Aw1[dev * batchCountPerGPU], batchCountPerGPU * sizeof(*Aw1), cudaMemAdviseSetPreferredLocation, dev));
CHECK_CUDA(cudaMemAdvise(&Bw1[dev * batchCountPerGPU], batchCountPerGPU * sizeof(*Bw1), cudaMemAdviseSetPreferredLocation, dev));
CHECK_CUDA(cudaMemAdvise(&Cb[dev * batchCountPerGPU], batchCountPerGPU * sizeof(*Cb), cudaMemAdviseSetPreferredLocation, dev));
CHECK_CUDA(cudaMemAdvise(&Cw[dev * batchCountPerGPU], batchCountPerGPU * sizeof(*Cw), cudaMemAdviseSetPreferredLocation, dev));
}
int idx = 0;
for (int bj = 0; bj < nby; bj++) {
for (int bi = 0; bi < nbx; bi++) {
__half* nn_sums00 = nn_sums + SUP_OFFSET(bi, bj, nbx) + SUB_OFFSET(0, 0);
__half* nn_sums11 = nn_sums + SUP_OFFSET(bi, bj, nbx) + SUB_OFFSET(1, 1);
__half* nn_sums01 = nn_sums + SUP_OFFSET(bi, bj, nbx) + SUB_OFFSET(0, 1);
__half* nn_sums10 = nn_sums + SUP_OFFSET(bi, bj, nbx) + SUB_OFFSET(1, 0);
__half* lat00 = lattice + SUP_OFFSET(bi, bj, nbx) + SUB_OFFSET(0, 0);
__half* lat11 = lattice + SUP_OFFSET(bi, bj, nbx) + SUB_OFFSET(1, 1);
__half* lat01 = lattice + SUP_OFFSET(bi, bj, nbx) + SUB_OFFSET(0, 1);
__half* lat10 = lattice + SUP_OFFSET(bi, bj, nbx) + SUB_OFFSET(1, 0);
// Black:
//nn_sum(0,0) = lattice(0,1) x K + K^T x lattice(1,0)
//nn_sum(1,1) = lattice(1,0) x K^T + K x lattice(0,1)
Ab0[idx ] = lat01; Bb0[idx ] = k;
Ab0[idx+1] = lat10; Bb0[idx+1] = kT;
Ab1[idx ] = kT; Bb1[idx ] = lat10;
Ab1[idx+1] = k; Bb1[idx+1] = lat01;
Cb[idx ] = nn_sums00;
Cb[idx+1] = nn_sums11;
// White:
//nn_sum(1,0) = lattice(1,1) x K + K x lattice(0,0)
//nn_sum(0,1) = lattice(0,0) x K^T + K^T x lattice(1,1)
Aw0[idx ] = lat00 ; Bw0[idx ] = kT;
Aw0[idx+1] = lat11 ; Bw0[idx+1] = k;
Aw1[idx ] = kT; Bw1[idx ] = lat11;
Aw1[idx+1] = k; Bw1[idx+1] = lat00;
Cw[idx ] = nn_sums01;
Cw[idx+1] = nn_sums10;
idx += 2;
}
}
sync(nGPUs);
float inv_temp = 1.0f / (alpha*TCRIT);
// Warmup
printf("Starting warmup...\n");
for (int n = 0; n < nwarmup; n++) {
update(Ab0, Bb0, Ab1, Bb1, Cb, Aw0, Bw0, Aw1, Bw1, Cw,
lattice, inv_temp, nn_sums, cublas_handles, n+1, nbx, nby, seed, nGPUs);
}
sync(nGPUs);
printf("Starting trial iterations...\n");
auto t0 = std::chrono::high_resolution_clock::now();
for (int n = nwarmup; n < niter + nwarmup; n++) {
update(Ab0, Bb0, Ab1, Bb1, Cb, Aw0, Bw0, Aw1, Bw1, Cw,
lattice, inv_temp, nn_sums, cublas_handles, n+1, nbx, nby, seed, nGPUs);
if ((n - nwarmup) % 1000 == 0) printf("Completed %d/%d iterations...\n", n - nwarmup + 1, niter);
}
sync(nGPUs);
auto t1 = std::chrono::high_resolution_clock::now();
double duration = (double) std::chrono::duration_cast<std::chrono::microseconds>(t1-t0).count();
printf("REPORT:\n");
printf("\tnGPUs: %d\n", nGPUs);
printf("\ttemperature: %f * %f\n", alpha, TCRIT);
printf("\tseed: %llu\n", seed);
printf("\twarmup iterations: %d\n", nwarmup);
printf("\ttrial iterations: %d\n", niter);
printf("\tlattice dimensions: %lld x %lld\n", nx, ny);
printf("\telapsed time: %f sec\n", duration * 1e-6);
printf("\tupdates per ns: %f\n", (double) (nx * ny) * niter / duration * 1e-3);
// Compute average magnetism
double* devsums;
int nchunks = (spinsPerGPU + CUB_CHUNK_SIZE - 1)/ CUB_CHUNK_SIZE;
CHECK_CUDA(cudaMallocManaged(&devsums, nGPUs * nchunks * sizeof(*devsums)));
for (int dev = 0 ; dev < nGPUs; dev++) {
CHECK_CUDA(cudaSetDevice(dev));
size_t cub_workspace_bytes = 0;
void* workspace = NULL;
CHECK_CUDA(cub::DeviceReduce::Sum(workspace, cub_workspace_bytes, &lattice[dev * spinsPerGPU], &devsums[dev*nchunks], CUB_CHUNK_SIZE));
CHECK_CUDA(cudaMalloc(&workspace, cub_workspace_bytes));
for (int n = 0; n < nchunks; n++) {
CHECK_CUDA(cub::DeviceReduce::Sum(workspace, cub_workspace_bytes, &lattice[dev * spinsPerGPU + n*CUB_CHUNK_SIZE],
&devsums[dev * nchunks + n], std::min((long long) CUB_CHUNK_SIZE, spinsPerGPU - n * CUB_CHUNK_SIZE)));
}
CHECK_CUDA(cudaFree(workspace));
}
sync(nGPUs);
double hostsum = 0;
for (int n = 0; n < nGPUs * nchunks; n++) {
hostsum += devsums[n];
}
std::cout << "\taverage magnetism (absolute): " << abs(hostsum / (nx * ny)) << std::endl;
CHECK_CUDA(cudaFree(devsums));
if (write) write_lattice(lattice, "final.txt", nbx, nby, nGPUs);
return 0;
}
|
05c056f7fc8b785d283a469d6ab6a632dbe38457.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <util/dimage.h>
#include <util/timer.h>
#include <util/image_ops.h>
#include "pyramid.h"
__global__ void conv_to_block_of_arrays(float2 *v,
rod::dimage_ptr<const float2> in,
KernPyramidLevel lvl,
float2 m)
{
int tx = threadIdx.x, ty = threadIdx.y,
bx = blockIdx.x, by = blockIdx.y;
int2 pos = make_int2(bx*blockDim.x + tx, by*blockDim.y + ty);
if(!in.is_inside(pos))
return;
in += in.offset_at(pos);
v += mem_index(lvl, pos);
*v = *in * m;
}
void upsample(PyramidLevel &dest, PyramidLevel &orig)
{
rod::base_timer &timer
= rod::timers.gpu_add("upsample",dest.width*dest.height,"P");
rod::dimage<float2> vec_orig;
internal_vector_to_image(vec_orig, orig.v, orig,
make_float2(1,1));
rod::dimage<float2> vec_dest(dest.width, dest.height);
rod::upsample(&vec_dest, &vec_orig, rod::INTERP_LINEAR);
dest.v.fill(0);
dim3 bdim(32,8),
gdim((dest.width+bdim.x-1)/bdim.x,
(dest.height+bdim.y-1)/bdim.y);
hipLaunchKernelGGL(( conv_to_block_of_arrays), dim3(gdim), dim3(bdim), 0, 0, &dest.v, &vec_dest, dest,
make_float2((float)dest.width/orig.width,
(float)dest.height/orig.height));
timer.stop();
}
| 05c056f7fc8b785d283a469d6ab6a632dbe38457.cu | #include <cuda.h>
#include <util/dimage.h>
#include <util/timer.h>
#include <util/image_ops.h>
#include "pyramid.h"
__global__ void conv_to_block_of_arrays(float2 *v,
rod::dimage_ptr<const float2> in,
KernPyramidLevel lvl,
float2 m)
{
int tx = threadIdx.x, ty = threadIdx.y,
bx = blockIdx.x, by = blockIdx.y;
int2 pos = make_int2(bx*blockDim.x + tx, by*blockDim.y + ty);
if(!in.is_inside(pos))
return;
in += in.offset_at(pos);
v += mem_index(lvl, pos);
*v = *in * m;
}
void upsample(PyramidLevel &dest, PyramidLevel &orig)
{
rod::base_timer &timer
= rod::timers.gpu_add("upsample",dest.width*dest.height,"P");
rod::dimage<float2> vec_orig;
internal_vector_to_image(vec_orig, orig.v, orig,
make_float2(1,1));
rod::dimage<float2> vec_dest(dest.width, dest.height);
rod::upsample(&vec_dest, &vec_orig, rod::INTERP_LINEAR);
dest.v.fill(0);
dim3 bdim(32,8),
gdim((dest.width+bdim.x-1)/bdim.x,
(dest.height+bdim.y-1)/bdim.y);
conv_to_block_of_arrays<<<gdim, bdim>>>(&dest.v, &vec_dest, dest,
make_float2((float)dest.width/orig.width,
(float)dest.height/orig.height));
timer.stop();
}
|
fe58739ab0147fcbd0c4076eca189253bbd1bd6f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "RunLengthEncodeGPU.h"
#include "TempSpaceBroker.h"
#include "common.h"
#include "CascadedCommon.h"
#include "nvcomp.hpp"
#include "type_macros.h"
#ifdef __GNUC__
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Weffc++"
#pragma GCC diagnostic ignored "-Wunused-parameter"
#endif
#include <hipcub/hipcub.hpp>
#include <hipcub/hipcub.hpp>
#ifdef __GNUC__
#pragma GCC diagnostic pop
#endif
#include <cassert>
#include <stdexcept>
#include <string>
namespace nvcomp
{
/******************************************************************************
* CONSTANTS ******************************************************************
*****************************************************************************/
namespace
{
constexpr const size_t ALIGN_OFFSET = 256;
constexpr const int WARP_SIZE = 32;
constexpr const int GLOBAL_TILE_SIZE = 1024;
} // namespace
/******************************************************************************
* KENRELS ********************************************************************
*****************************************************************************/
namespace
{
template <typename T, int NUM_THREADS>
__device__ T warpSum(T const initVal)
{
constexpr const uint32_t mask
= NUM_THREADS < WARP_SIZE ? (1u << NUM_THREADS) - 1 : 0xffffffff;
T val = initVal;
for (int d = NUM_THREADS / 2; d > 0; d /= 2) {
val += __shfl_down_sync(mask, val, d, NUM_THREADS);
}
return val;
}
template <typename T, int BLOCK_SIZE>
__device__ T cooperativeSum(T const initVal, T* const buffer)
{
// first all warps reduce to single value
assert(BLOCK_SIZE % WARP_SIZE == 0);
assert(BLOCK_SIZE <= WARP_SIZE * WARP_SIZE);
T val = warpSum<T, WARP_SIZE>(initVal);
if (threadIdx.x % WARP_SIZE == 0) {
buffer[threadIdx.x / WARP_SIZE] = val;
}
__syncthreads();
if (threadIdx.x < (BLOCK_SIZE / WARP_SIZE)) {
val = warpSum<T, BLOCK_SIZE / WARP_SIZE>(buffer[threadIdx.x]);
}
return val;
}
/**
* @brief This kernel produces the block sizes for a prefixsum in a subsequent
* kernel.
*
* @tparam VALUE The value type.
* @tparam RUN The run count type.
* @param in The input data.
* @param num The size of the input data.
* @param blockSize The location to write the block sizes (output).
*/
template <typename VALUE, typename RUN, int BLOCK_SIZE, int TILE_SIZE>
__global__ void rleInitKernel(
const VALUE* const in,
const size_t* const numInDevice,
RUN* const blockSize)
{
constexpr const int ITEMS_PER_THREAD = TILE_SIZE / BLOCK_SIZE;
// the algorithm here is to keep reducing "chunks" to a start and end marker
const int num = static_cast<int>(*numInDevice);
if (blockIdx.x * TILE_SIZE < num) {
// we load the preceding value in the first spot
__shared__ VALUE valBuffer[TILE_SIZE + 1];
__shared__ RUN buffer[BLOCK_SIZE / WARP_SIZE];
if (threadIdx.x == 0) {
valBuffer[0]
= blockIdx.x > 0 ? in[blockIdx.x * TILE_SIZE - 1] : (in[0] + 1);
}
for (int tid = threadIdx.x; tid < TILE_SIZE; tid += BLOCK_SIZE) {
const int gTid = tid + blockIdx.x * TILE_SIZE;
// cooperatively populate valBuffer and runBuffer
if (gTid < num) {
valBuffer[tid + 1] = in[gTid];
} else {
valBuffer[tid + 1] = in[num - 1];
}
}
__syncthreads();
// build bit mask
VALUE val = valBuffer[threadIdx.x * ITEMS_PER_THREAD];
RUN sum = 0;
for (int i = 0; i < ITEMS_PER_THREAD; ++i) {
const int tid = threadIdx.x * ITEMS_PER_THREAD + i;
const VALUE nextVal = valBuffer[tid + 1];
sum += nextVal != val;
val = nextVal;
}
sum = cooperativeSum<RUN, BLOCK_SIZE>(sum, buffer);
if (threadIdx.x == 0) {
blockSize[blockIdx.x] = sum;
}
} else if (threadIdx.x == 0) {
blockSize[blockIdx.x] = 0;
}
if (blockIdx.x == gridDim.x - 1 && threadIdx.x == 0) {
blockSize[gridDim.x] = 0;
}
}
template <typename VALUE, typename RUN, int BLOCK_SIZE, int TILE_SIZE>
__global__ void rleReduceKernel(
const VALUE* const in,
const size_t* const numInDevice,
const RUN* const blockPrefix,
RUN* const blockStart,
VALUE** const valsPtr,
RUN** const runsPtr,
size_t* const numOutDevice)
{
constexpr const int ITEMS_PER_THREAD = TILE_SIZE / BLOCK_SIZE;
// the algorithm here is to keep reducing "chunks" to a start and end marker
const int num = static_cast<int>(*numInDevice);
if (blockIdx.x * TILE_SIZE < num) {
VALUE* const vals = *valsPtr;
RUN* const runs = *runsPtr;
// we load the preceding value in the first spot
__shared__ VALUE valBuffer[TILE_SIZE + 1];
// we store the sum in the last spot
__shared__ RUN prefix[BLOCK_SIZE + 1];
if (threadIdx.x == 0) {
valBuffer[0]
= blockIdx.x > 0 ? in[blockIdx.x * TILE_SIZE - 1] : (in[0] + 1);
}
for (int tid = threadIdx.x; tid < TILE_SIZE; tid += BLOCK_SIZE) {
const int gTid = tid + blockIdx.x * TILE_SIZE;
// cooperatively populate valBuffer and runBuffer
if (gTid < num) {
valBuffer[tid + 1] = in[gTid];
} else {
valBuffer[tid + 1] = in[num - 1];
}
}
__syncthreads();
// build bit mask
RUN sum = 0;
{
VALUE val = valBuffer[threadIdx.x * ITEMS_PER_THREAD];
for (int i = 0; i < ITEMS_PER_THREAD; ++i) {
const int tid = threadIdx.x * ITEMS_PER_THREAD + i;
const VALUE nextVal = valBuffer[tid + 1];
sum += nextVal != val;
val = nextVal;
}
}
__syncthreads();
// prefixsum bit mask
{
typedef hipcub::BlockScan<RUN, BLOCK_SIZE> BlockScan;
__shared__ typename BlockScan::TempStorage temp_storage;
BlockScan(temp_storage).ExclusiveSum(sum, sum);
prefix[threadIdx.x] = sum;
if (threadIdx.x == 0) {
prefix[BLOCK_SIZE]
= blockPrefix[blockIdx.x + 1] - blockPrefix[blockIdx.x];
}
}
__syncthreads();
__shared__ RUN runBuffer[TILE_SIZE + 1];
// do local run length encoding with an undifferentiated run count
{
int outIdx = prefix[threadIdx.x];
VALUE val = valBuffer[threadIdx.x * ITEMS_PER_THREAD];
for (int i = 0; i < ITEMS_PER_THREAD; ++i) {
const int tid = threadIdx.x * ITEMS_PER_THREAD + i;
const VALUE nextVal = valBuffer[tid + 1];
if (nextVal != val) {
runBuffer[outIdx] = tid;
val = nextVal;
++outIdx;
}
}
}
const RUN numCompacted = prefix[BLOCK_SIZE];
if (threadIdx.x == 0) {
runBuffer[numCompacted] = ((blockIdx.x + 1) * TILE_SIZE >= num)
? ((num - 1) % TILE_SIZE) + 1
: TILE_SIZE;
}
__syncthreads();
// write back to global memory
const RUN offset = blockPrefix[blockIdx.x];
for (int tid = threadIdx.x; tid < numCompacted; tid += BLOCK_SIZE) {
// runs still need to be differentiated -- the last one will need to the
// number of values
vals[offset + tid] = valBuffer[runBuffer[tid] + 1];
assert(runBuffer[tid + 1] >= runBuffer[tid]);
runs[offset + tid] = runBuffer[tid + 1] - runBuffer[tid];
}
if (threadIdx.x == 0) {
blockStart[blockIdx.x] = runBuffer[0] + blockIdx.x * TILE_SIZE;
}
}
if (blockIdx.x == gridDim.x - 1 && threadIdx.x == BLOCK_SIZE - 1) {
*numOutDevice = blockPrefix[gridDim.x];
}
}
/**
* @brief Fix block join gaps, that is where the run count for a given number
* fails to account for duplicates in the following block(s). This requires
* that the first run in each block's output, not be differentiated.
*
* @param runs The almost finished runs.
* @param blockPrefix The previously calculated block prefix.
* @param num The number of entries.
*/
template <typename RUN, int BLOCK_SIZE, int TILE_SIZE>
__global__ void rleFinalizeKernel(
RUN** const runsPtr,
const RUN* const blockStart,
const RUN* const blockPrefix,
const size_t* const numInDevice)
{
const int num = roundUpDiv(static_cast<int>(*numInDevice), TILE_SIZE);
if (blockIdx.x * BLOCK_SIZE < num) {
RUN* const runs = *runsPtr;
// we load the blocks runs plus 1 extra
__shared__ RUN prefixBuffer[BLOCK_SIZE + 1];
int tid = threadIdx.x + blockIdx.x * BLOCK_SIZE;
const RUN bp = tid < num ? blockPrefix[tid] : blockPrefix[num];
prefixBuffer[threadIdx.x] = bp;
if (threadIdx.x == 0) {
prefixBuffer[BLOCK_SIZE] = blockPrefix[(blockIdx.x + 1) * BLOCK_SIZE];
}
__syncthreads();
if (tid < num) {
if (bp > 0 && (tid + 1 == num || bp < prefixBuffer[threadIdx.x + 1])) {
// TODO: make this a binary search
int low = 0;
int high = tid;
while (high > low) {
const int mid = (low + high) / 2;
if (blockPrefix[mid] == bp) {
// keep searching down
high = mid;
} else {
// start searching up
low = mid + 1;
}
}
// we need to fix the count for this block
runs[bp - 1] += blockStart[tid] - low * TILE_SIZE;
}
}
}
}
} // namespace
/******************************************************************************
* HELPER FUNCTIONS ***********************************************************
*****************************************************************************/
namespace
{
template <typename T>
size_t downstreamWorkspaceSize(const size_t num)
{
return sizeof(T) * ::max(1024ULL, 3ULL * roundUpDiv(num, GLOBAL_TILE_SIZE))
+ sizeof(int);
}
template <typename T, typename U>
size_t requiredWorkspaceSizeTyped(const size_t num)
{
// TODO: this assume large datatype
T* inPtr = nullptr;
T* valsPtr = nullptr;
U* runsPtr = nullptr;
size_t* numPtr = nullptr;
size_t workspaceSize = 0;
hipError_t err = hipcub::DeviceRunLengthEncode::Encode(
nullptr,
workspaceSize,
inPtr,
valsPtr,
runsPtr,
numPtr,
static_cast<int>(num),
0);
if (err != hipSuccess) {
throw std::runtime_error(
"Failed to get workspace size: " + std::to_string(err));
}
workspaceSize = ::max(workspaceSize, downstreamWorkspaceSize<U>(num));
return ALIGN_OFFSET + workspaceSize;
}
template <typename VALUE, typename COUNT>
void compressInternal(
void* const workspace,
const size_t workspaceSize,
void* const outValues,
void* const outCounts,
size_t* numOutDevice,
void const* const in,
size_t const num,
hipStream_t stream)
{
VALUE* const outValuesTyped = static_cast<VALUE*>(outValues);
COUNT* const outCountsTyped = static_cast<COUNT*>(outCounts);
const VALUE* const inTyped = static_cast<const VALUE*>(in);
const size_t reqWorkspaceSize = RunLengthEncodeGPU::requiredWorkspaceSize(
num, getnvcompType<VALUE>(), getnvcompType<COUNT>());
if (workspaceSize < reqWorkspaceSize) {
throw std::runtime_error(
"Invalid workspace size: " + std::to_string(workspaceSize)
+ ", need at least " + std::to_string(reqWorkspaceSize));
}
void* const alignedWorkspace = align(workspace, ALIGN_OFFSET);
size_t alignedWorkspaceSize
= workspaceSize - relativeEndOffset(workspace, alignedWorkspace);
hipError_t err = hipcub::DeviceRunLengthEncode::Encode(
alignedWorkspace,
alignedWorkspaceSize,
inTyped,
outValuesTyped,
outCountsTyped,
numOutDevice,
static_cast<int>(num),
stream);
if (err != hipSuccess) {
throw std::runtime_error(
"Failed to schedule hipcub::DeviceRunLengthEncode::Encode() kernel: "
+ std::to_string(err));
}
}
template <typename VALUE, typename COUNT>
void compressDownstreamInternal(
void* const workspace,
const size_t workspaceSize,
void** const outValuesPtr,
void** const outCountsPtr,
size_t* numOutDevice,
void const* const in,
size_t const* numInDevice,
const size_t maxNum,
hipStream_t stream)
{
VALUE** const outValuesTypedPtr = reinterpret_cast<VALUE**>(outValuesPtr);
COUNT** const outCountsTypedPtr = reinterpret_cast<COUNT**>(outCountsPtr);
const VALUE* const inTyped = static_cast<const VALUE*>(in);
const size_t reqWorkspaceSize = downstreamWorkspaceSize<COUNT>(maxNum);
if (workspaceSize < reqWorkspaceSize) {
throw std::runtime_error(
"Invalid workspace size: " + std::to_string(workspaceSize)
+ ", need at least " + std::to_string(reqWorkspaceSize));
}
constexpr const int BLOCK_SIZE = 128;
const dim3 grid(roundUpDiv(maxNum, GLOBAL_TILE_SIZE));
const dim3 block(BLOCK_SIZE);
TempSpaceBroker tempSpace(workspace, workspaceSize);
COUNT* blockSizes;
COUNT* blockPrefix;
COUNT* blockStart;
tempSpace.reserve(&blockSizes, grid.x);
tempSpace.reserve(&blockPrefix, grid.x + 1);
tempSpace.reserve(&blockStart, grid.x);
void* const scanWorkspace = tempSpace.next();
// TODO: expand such that the mask calculation is done across the entire
// array, and the the prefixsum, and then reduction
// get blocks sizes
hipLaunchKernelGGL(( rleInitKernel<VALUE, COUNT, BLOCK_SIZE, GLOBAL_TILE_SIZE>)
, dim3(grid), dim3(block), 0, stream, inTyped, numInDevice, blockSizes);
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
throw std::runtime_error(
"Failed to launch rleInitKernel: " + std::to_string(err));
}
// get output locations
size_t requiredSpace;
err = hipcub::DeviceScan::ExclusiveSum(
nullptr, requiredSpace, blockSizes, blockPrefix, grid.x + 1, stream);
if (err != hipSuccess) {
throw std::runtime_error(
"Failed to query rleScanKernel: " + std::to_string(err));
}
size_t scanWorkspaceSize
= ::max(1024 * sizeof(COUNT), maxNum * sizeof(COUNT));
if (requiredSpace > scanWorkspaceSize) {
throw std::runtime_error(
"Too little workspace: " + std::to_string(scanWorkspaceSize) + ", need "
+ std::to_string(requiredSpace));
}
err = hipcub::DeviceScan::ExclusiveSum(
scanWorkspace,
scanWorkspaceSize,
blockSizes,
blockPrefix,
grid.x + 1,
stream);
if (err != hipSuccess) {
throw std::runtime_error(
"Failed to launch rleScanKernel: " + std::to_string(err)
+ ", with "
"grid.x = "
+ std::to_string(grid.x + 1) + " items.");
}
// do actual compaction
hipLaunchKernelGGL(( rleReduceKernel<VALUE, COUNT, BLOCK_SIZE, GLOBAL_TILE_SIZE>)
, dim3(grid), dim3(block), 0, stream,
inTyped,
numInDevice,
blockPrefix,
blockStart,
outValuesTypedPtr,
outCountsTypedPtr,
numOutDevice);
err = hipGetLastError();
if (err != hipSuccess) {
throw std::runtime_error(
"Failed to launch rleReduceKernel: " + std::to_string(err));
}
// fix gaps
hipLaunchKernelGGL(( rleFinalizeKernel<COUNT, BLOCK_SIZE, GLOBAL_TILE_SIZE>)
, dim3(dim3(roundUpDiv(grid.x, block.x))), dim3(block), 0, stream,
outCountsTypedPtr, blockStart, blockPrefix, numInDevice);
err = hipGetLastError();
if (err != hipSuccess) {
throw std::runtime_error(
"Failed to launch rleFinalizeKernel: " + std::to_string(err));
}
}
} // namespace
/******************************************************************************
* PUBLIC STATIC FUNCTIONS ****************************************************
*****************************************************************************/
void RunLengthEncodeGPU::compress(
void* workspace,
size_t workspaceSize,
nvcompType_t valueType,
void* const outValues,
nvcompType_t countType,
void* const outCounts,
size_t* const numOutDevice,
const void* const in,
const size_t num,
hipStream_t stream)
{
NVCOMP_TYPE_TWO_SWITCH(
valueType,
countType,
compressInternal,
workspace,
workspaceSize,
outValues,
outCounts,
numOutDevice,
in,
num,
stream);
}
void RunLengthEncodeGPU::compressDownstream(
void* workspace,
size_t workspaceSize,
nvcompType_t valueType,
void** const outValuesPtr,
nvcompType_t countType,
void** const outCountsPtr,
size_t* const numOutDevice,
const void* const in,
const size_t* numInDevice,
const size_t maxNum,
hipStream_t stream)
{
NVCOMP_TYPE_TWO_SWITCH(
valueType,
countType,
compressDownstreamInternal,
workspace,
workspaceSize,
outValuesPtr,
outCountsPtr,
numOutDevice,
in,
numInDevice,
maxNum,
stream);
}
size_t RunLengthEncodeGPU::requiredWorkspaceSize(
const size_t num, const nvcompType_t valueType, const nvcompType_t runType)
{
NVCOMP_TYPE_TWO_SWITCH_RETURN(
valueType, runType, requiredWorkspaceSizeTyped, num);
}
} // namespace nvcomp
| fe58739ab0147fcbd0c4076eca189253bbd1bd6f.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "RunLengthEncodeGPU.h"
#include "TempSpaceBroker.h"
#include "common.h"
#include "CascadedCommon.h"
#include "nvcomp.hpp"
#include "type_macros.h"
#ifdef __GNUC__
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Weffc++"
#pragma GCC diagnostic ignored "-Wunused-parameter"
#endif
#include <cub/device/device_run_length_encode.cuh>
#include <cub/device/device_scan.cuh>
#ifdef __GNUC__
#pragma GCC diagnostic pop
#endif
#include <cassert>
#include <stdexcept>
#include <string>
namespace nvcomp
{
/******************************************************************************
* CONSTANTS ******************************************************************
*****************************************************************************/
namespace
{
constexpr const size_t ALIGN_OFFSET = 256;
constexpr const int WARP_SIZE = 32;
constexpr const int GLOBAL_TILE_SIZE = 1024;
} // namespace
/******************************************************************************
* KENRELS ********************************************************************
*****************************************************************************/
namespace
{
template <typename T, int NUM_THREADS>
__device__ T warpSum(T const initVal)
{
constexpr const uint32_t mask
= NUM_THREADS < WARP_SIZE ? (1u << NUM_THREADS) - 1 : 0xffffffff;
T val = initVal;
for (int d = NUM_THREADS / 2; d > 0; d /= 2) {
val += __shfl_down_sync(mask, val, d, NUM_THREADS);
}
return val;
}
template <typename T, int BLOCK_SIZE>
__device__ T cooperativeSum(T const initVal, T* const buffer)
{
// first all warps reduce to single value
assert(BLOCK_SIZE % WARP_SIZE == 0);
assert(BLOCK_SIZE <= WARP_SIZE * WARP_SIZE);
T val = warpSum<T, WARP_SIZE>(initVal);
if (threadIdx.x % WARP_SIZE == 0) {
buffer[threadIdx.x / WARP_SIZE] = val;
}
__syncthreads();
if (threadIdx.x < (BLOCK_SIZE / WARP_SIZE)) {
val = warpSum<T, BLOCK_SIZE / WARP_SIZE>(buffer[threadIdx.x]);
}
return val;
}
/**
* @brief This kernel produces the block sizes for a prefixsum in a subsequent
* kernel.
*
* @tparam VALUE The value type.
* @tparam RUN The run count type.
* @param in The input data.
* @param num The size of the input data.
* @param blockSize The location to write the block sizes (output).
*/
template <typename VALUE, typename RUN, int BLOCK_SIZE, int TILE_SIZE>
__global__ void rleInitKernel(
const VALUE* const in,
const size_t* const numInDevice,
RUN* const blockSize)
{
constexpr const int ITEMS_PER_THREAD = TILE_SIZE / BLOCK_SIZE;
// the algorithm here is to keep reducing "chunks" to a start and end marker
const int num = static_cast<int>(*numInDevice);
if (blockIdx.x * TILE_SIZE < num) {
// we load the preceding value in the first spot
__shared__ VALUE valBuffer[TILE_SIZE + 1];
__shared__ RUN buffer[BLOCK_SIZE / WARP_SIZE];
if (threadIdx.x == 0) {
valBuffer[0]
= blockIdx.x > 0 ? in[blockIdx.x * TILE_SIZE - 1] : (in[0] + 1);
}
for (int tid = threadIdx.x; tid < TILE_SIZE; tid += BLOCK_SIZE) {
const int gTid = tid + blockIdx.x * TILE_SIZE;
// cooperatively populate valBuffer and runBuffer
if (gTid < num) {
valBuffer[tid + 1] = in[gTid];
} else {
valBuffer[tid + 1] = in[num - 1];
}
}
__syncthreads();
// build bit mask
VALUE val = valBuffer[threadIdx.x * ITEMS_PER_THREAD];
RUN sum = 0;
for (int i = 0; i < ITEMS_PER_THREAD; ++i) {
const int tid = threadIdx.x * ITEMS_PER_THREAD + i;
const VALUE nextVal = valBuffer[tid + 1];
sum += nextVal != val;
val = nextVal;
}
sum = cooperativeSum<RUN, BLOCK_SIZE>(sum, buffer);
if (threadIdx.x == 0) {
blockSize[blockIdx.x] = sum;
}
} else if (threadIdx.x == 0) {
blockSize[blockIdx.x] = 0;
}
if (blockIdx.x == gridDim.x - 1 && threadIdx.x == 0) {
blockSize[gridDim.x] = 0;
}
}
template <typename VALUE, typename RUN, int BLOCK_SIZE, int TILE_SIZE>
__global__ void rleReduceKernel(
const VALUE* const in,
const size_t* const numInDevice,
const RUN* const blockPrefix,
RUN* const blockStart,
VALUE** const valsPtr,
RUN** const runsPtr,
size_t* const numOutDevice)
{
constexpr const int ITEMS_PER_THREAD = TILE_SIZE / BLOCK_SIZE;
// the algorithm here is to keep reducing "chunks" to a start and end marker
const int num = static_cast<int>(*numInDevice);
if (blockIdx.x * TILE_SIZE < num) {
VALUE* const vals = *valsPtr;
RUN* const runs = *runsPtr;
// we load the preceding value in the first spot
__shared__ VALUE valBuffer[TILE_SIZE + 1];
// we store the sum in the last spot
__shared__ RUN prefix[BLOCK_SIZE + 1];
if (threadIdx.x == 0) {
valBuffer[0]
= blockIdx.x > 0 ? in[blockIdx.x * TILE_SIZE - 1] : (in[0] + 1);
}
for (int tid = threadIdx.x; tid < TILE_SIZE; tid += BLOCK_SIZE) {
const int gTid = tid + blockIdx.x * TILE_SIZE;
// cooperatively populate valBuffer and runBuffer
if (gTid < num) {
valBuffer[tid + 1] = in[gTid];
} else {
valBuffer[tid + 1] = in[num - 1];
}
}
__syncthreads();
// build bit mask
RUN sum = 0;
{
VALUE val = valBuffer[threadIdx.x * ITEMS_PER_THREAD];
for (int i = 0; i < ITEMS_PER_THREAD; ++i) {
const int tid = threadIdx.x * ITEMS_PER_THREAD + i;
const VALUE nextVal = valBuffer[tid + 1];
sum += nextVal != val;
val = nextVal;
}
}
__syncthreads();
// prefixsum bit mask
{
typedef cub::BlockScan<RUN, BLOCK_SIZE> BlockScan;
__shared__ typename BlockScan::TempStorage temp_storage;
BlockScan(temp_storage).ExclusiveSum(sum, sum);
prefix[threadIdx.x] = sum;
if (threadIdx.x == 0) {
prefix[BLOCK_SIZE]
= blockPrefix[blockIdx.x + 1] - blockPrefix[blockIdx.x];
}
}
__syncthreads();
__shared__ RUN runBuffer[TILE_SIZE + 1];
// do local run length encoding with an undifferentiated run count
{
int outIdx = prefix[threadIdx.x];
VALUE val = valBuffer[threadIdx.x * ITEMS_PER_THREAD];
for (int i = 0; i < ITEMS_PER_THREAD; ++i) {
const int tid = threadIdx.x * ITEMS_PER_THREAD + i;
const VALUE nextVal = valBuffer[tid + 1];
if (nextVal != val) {
runBuffer[outIdx] = tid;
val = nextVal;
++outIdx;
}
}
}
const RUN numCompacted = prefix[BLOCK_SIZE];
if (threadIdx.x == 0) {
runBuffer[numCompacted] = ((blockIdx.x + 1) * TILE_SIZE >= num)
? ((num - 1) % TILE_SIZE) + 1
: TILE_SIZE;
}
__syncthreads();
// write back to global memory
const RUN offset = blockPrefix[blockIdx.x];
for (int tid = threadIdx.x; tid < numCompacted; tid += BLOCK_SIZE) {
// runs still need to be differentiated -- the last one will need to the
// number of values
vals[offset + tid] = valBuffer[runBuffer[tid] + 1];
assert(runBuffer[tid + 1] >= runBuffer[tid]);
runs[offset + tid] = runBuffer[tid + 1] - runBuffer[tid];
}
if (threadIdx.x == 0) {
blockStart[blockIdx.x] = runBuffer[0] + blockIdx.x * TILE_SIZE;
}
}
if (blockIdx.x == gridDim.x - 1 && threadIdx.x == BLOCK_SIZE - 1) {
*numOutDevice = blockPrefix[gridDim.x];
}
}
/**
* @brief Fix block join gaps, that is where the run count for a given number
* fails to account for duplicates in the following block(s). This requires
* that the first run in each block's output, not be differentiated.
*
* @param runs The almost finished runs.
* @param blockPrefix The previously calculated block prefix.
* @param num The number of entries.
*/
template <typename RUN, int BLOCK_SIZE, int TILE_SIZE>
__global__ void rleFinalizeKernel(
RUN** const runsPtr,
const RUN* const blockStart,
const RUN* const blockPrefix,
const size_t* const numInDevice)
{
const int num = roundUpDiv(static_cast<int>(*numInDevice), TILE_SIZE);
if (blockIdx.x * BLOCK_SIZE < num) {
RUN* const runs = *runsPtr;
// we load the blocks runs plus 1 extra
__shared__ RUN prefixBuffer[BLOCK_SIZE + 1];
int tid = threadIdx.x + blockIdx.x * BLOCK_SIZE;
const RUN bp = tid < num ? blockPrefix[tid] : blockPrefix[num];
prefixBuffer[threadIdx.x] = bp;
if (threadIdx.x == 0) {
prefixBuffer[BLOCK_SIZE] = blockPrefix[(blockIdx.x + 1) * BLOCK_SIZE];
}
__syncthreads();
if (tid < num) {
if (bp > 0 && (tid + 1 == num || bp < prefixBuffer[threadIdx.x + 1])) {
// TODO: make this a binary search
int low = 0;
int high = tid;
while (high > low) {
const int mid = (low + high) / 2;
if (blockPrefix[mid] == bp) {
// keep searching down
high = mid;
} else {
// start searching up
low = mid + 1;
}
}
// we need to fix the count for this block
runs[bp - 1] += blockStart[tid] - low * TILE_SIZE;
}
}
}
}
} // namespace
/******************************************************************************
* HELPER FUNCTIONS ***********************************************************
*****************************************************************************/
namespace
{
template <typename T>
size_t downstreamWorkspaceSize(const size_t num)
{
return sizeof(T) * std::max(1024ULL, 3ULL * roundUpDiv(num, GLOBAL_TILE_SIZE))
+ sizeof(int);
}
template <typename T, typename U>
size_t requiredWorkspaceSizeTyped(const size_t num)
{
// TODO: this assume large datatype
T* inPtr = nullptr;
T* valsPtr = nullptr;
U* runsPtr = nullptr;
size_t* numPtr = nullptr;
size_t workspaceSize = 0;
cudaError_t err = cub::DeviceRunLengthEncode::Encode(
nullptr,
workspaceSize,
inPtr,
valsPtr,
runsPtr,
numPtr,
static_cast<int>(num),
0);
if (err != cudaSuccess) {
throw std::runtime_error(
"Failed to get workspace size: " + std::to_string(err));
}
workspaceSize = std::max(workspaceSize, downstreamWorkspaceSize<U>(num));
return ALIGN_OFFSET + workspaceSize;
}
template <typename VALUE, typename COUNT>
void compressInternal(
void* const workspace,
const size_t workspaceSize,
void* const outValues,
void* const outCounts,
size_t* numOutDevice,
void const* const in,
size_t const num,
cudaStream_t stream)
{
VALUE* const outValuesTyped = static_cast<VALUE*>(outValues);
COUNT* const outCountsTyped = static_cast<COUNT*>(outCounts);
const VALUE* const inTyped = static_cast<const VALUE*>(in);
const size_t reqWorkspaceSize = RunLengthEncodeGPU::requiredWorkspaceSize(
num, getnvcompType<VALUE>(), getnvcompType<COUNT>());
if (workspaceSize < reqWorkspaceSize) {
throw std::runtime_error(
"Invalid workspace size: " + std::to_string(workspaceSize)
+ ", need at least " + std::to_string(reqWorkspaceSize));
}
void* const alignedWorkspace = align(workspace, ALIGN_OFFSET);
size_t alignedWorkspaceSize
= workspaceSize - relativeEndOffset(workspace, alignedWorkspace);
cudaError_t err = cub::DeviceRunLengthEncode::Encode(
alignedWorkspace,
alignedWorkspaceSize,
inTyped,
outValuesTyped,
outCountsTyped,
numOutDevice,
static_cast<int>(num),
stream);
if (err != cudaSuccess) {
throw std::runtime_error(
"Failed to schedule cub::DeviceRunLengthEncode::Encode() kernel: "
+ std::to_string(err));
}
}
template <typename VALUE, typename COUNT>
void compressDownstreamInternal(
void* const workspace,
const size_t workspaceSize,
void** const outValuesPtr,
void** const outCountsPtr,
size_t* numOutDevice,
void const* const in,
size_t const* numInDevice,
const size_t maxNum,
cudaStream_t stream)
{
VALUE** const outValuesTypedPtr = reinterpret_cast<VALUE**>(outValuesPtr);
COUNT** const outCountsTypedPtr = reinterpret_cast<COUNT**>(outCountsPtr);
const VALUE* const inTyped = static_cast<const VALUE*>(in);
const size_t reqWorkspaceSize = downstreamWorkspaceSize<COUNT>(maxNum);
if (workspaceSize < reqWorkspaceSize) {
throw std::runtime_error(
"Invalid workspace size: " + std::to_string(workspaceSize)
+ ", need at least " + std::to_string(reqWorkspaceSize));
}
constexpr const int BLOCK_SIZE = 128;
const dim3 grid(roundUpDiv(maxNum, GLOBAL_TILE_SIZE));
const dim3 block(BLOCK_SIZE);
TempSpaceBroker tempSpace(workspace, workspaceSize);
COUNT* blockSizes;
COUNT* blockPrefix;
COUNT* blockStart;
tempSpace.reserve(&blockSizes, grid.x);
tempSpace.reserve(&blockPrefix, grid.x + 1);
tempSpace.reserve(&blockStart, grid.x);
void* const scanWorkspace = tempSpace.next();
// TODO: expand such that the mask calculation is done across the entire
// array, and the the prefixsum, and then reduction
// get blocks sizes
rleInitKernel<VALUE, COUNT, BLOCK_SIZE, GLOBAL_TILE_SIZE>
<<<grid, block, 0, stream>>>(inTyped, numInDevice, blockSizes);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
throw std::runtime_error(
"Failed to launch rleInitKernel: " + std::to_string(err));
}
// get output locations
size_t requiredSpace;
err = cub::DeviceScan::ExclusiveSum(
nullptr, requiredSpace, blockSizes, blockPrefix, grid.x + 1, stream);
if (err != cudaSuccess) {
throw std::runtime_error(
"Failed to query rleScanKernel: " + std::to_string(err));
}
size_t scanWorkspaceSize
= std::max(1024 * sizeof(COUNT), maxNum * sizeof(COUNT));
if (requiredSpace > scanWorkspaceSize) {
throw std::runtime_error(
"Too little workspace: " + std::to_string(scanWorkspaceSize) + ", need "
+ std::to_string(requiredSpace));
}
err = cub::DeviceScan::ExclusiveSum(
scanWorkspace,
scanWorkspaceSize,
blockSizes,
blockPrefix,
grid.x + 1,
stream);
if (err != cudaSuccess) {
throw std::runtime_error(
"Failed to launch rleScanKernel: " + std::to_string(err)
+ ", with "
"grid.x = "
+ std::to_string(grid.x + 1) + " items.");
}
// do actual compaction
rleReduceKernel<VALUE, COUNT, BLOCK_SIZE, GLOBAL_TILE_SIZE>
<<<grid, block, 0, stream>>>(
inTyped,
numInDevice,
blockPrefix,
blockStart,
outValuesTypedPtr,
outCountsTypedPtr,
numOutDevice);
err = cudaGetLastError();
if (err != cudaSuccess) {
throw std::runtime_error(
"Failed to launch rleReduceKernel: " + std::to_string(err));
}
// fix gaps
rleFinalizeKernel<COUNT, BLOCK_SIZE, GLOBAL_TILE_SIZE>
<<<dim3(roundUpDiv(grid.x, block.x)), block, 0, stream>>>(
outCountsTypedPtr, blockStart, blockPrefix, numInDevice);
err = cudaGetLastError();
if (err != cudaSuccess) {
throw std::runtime_error(
"Failed to launch rleFinalizeKernel: " + std::to_string(err));
}
}
} // namespace
/******************************************************************************
* PUBLIC STATIC FUNCTIONS ****************************************************
*****************************************************************************/
void RunLengthEncodeGPU::compress(
void* workspace,
size_t workspaceSize,
nvcompType_t valueType,
void* const outValues,
nvcompType_t countType,
void* const outCounts,
size_t* const numOutDevice,
const void* const in,
const size_t num,
cudaStream_t stream)
{
NVCOMP_TYPE_TWO_SWITCH(
valueType,
countType,
compressInternal,
workspace,
workspaceSize,
outValues,
outCounts,
numOutDevice,
in,
num,
stream);
}
void RunLengthEncodeGPU::compressDownstream(
void* workspace,
size_t workspaceSize,
nvcompType_t valueType,
void** const outValuesPtr,
nvcompType_t countType,
void** const outCountsPtr,
size_t* const numOutDevice,
const void* const in,
const size_t* numInDevice,
const size_t maxNum,
cudaStream_t stream)
{
NVCOMP_TYPE_TWO_SWITCH(
valueType,
countType,
compressDownstreamInternal,
workspace,
workspaceSize,
outValuesPtr,
outCountsPtr,
numOutDevice,
in,
numInDevice,
maxNum,
stream);
}
size_t RunLengthEncodeGPU::requiredWorkspaceSize(
const size_t num, const nvcompType_t valueType, const nvcompType_t runType)
{
NVCOMP_TYPE_TWO_SWITCH_RETURN(
valueType, runType, requiredWorkspaceSizeTyped, num);
}
} // namespace nvcomp
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.