hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
04d7a3b439d1d62003924085e9b116e0c78cc7a7.hip | // !!! This is a file automatically generated by hipify!!!
// ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// Copyright (c) 2018-2023 www.open3d.org
// SPDX-License-Identifier: MIT
// ----------------------------------------------------------------------------
#include <thrust/execution_policy.h>
#include <thrust/sequence.h>
#include "open3d/core/CUDAUtils.h"
#include "open3d/core/hashmap/HashBackendBuffer.h"
namespace open3d {
namespace core {
void CUDAResetHeap(Tensor &heap) {
uint32_t *heap_ptr = heap.GetDataPtr<uint32_t>();
thrust::sequence(thrust::device, heap_ptr, heap_ptr + heap.GetLength(), 0);
OPEN3D_CUDA_CHECK(hipGetLastError());
}
} // namespace core
} // namespace open3d
| 04d7a3b439d1d62003924085e9b116e0c78cc7a7.cu | // ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// Copyright (c) 2018-2023 www.open3d.org
// SPDX-License-Identifier: MIT
// ----------------------------------------------------------------------------
#include <thrust/execution_policy.h>
#include <thrust/sequence.h>
#include "open3d/core/CUDAUtils.h"
#include "open3d/core/hashmap/HashBackendBuffer.h"
namespace open3d {
namespace core {
void CUDAResetHeap(Tensor &heap) {
uint32_t *heap_ptr = heap.GetDataPtr<uint32_t>();
thrust::sequence(thrust::device, heap_ptr, heap_ptr + heap.GetLength(), 0);
OPEN3D_CUDA_CHECK(cudaGetLastError());
}
} // namespace core
} // namespace open3d
|
a93f953a7b19de69aaa8ab15670e1082f301d069.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* calcSoilViscositySO.cu
*
* Created on: 15-04-2015
* Author: Kamil Szewc
*
*/
#include "../../sph.h"
#include "../../hlp.h"
__global__ void calcSoilViscositySO(Particle *p, Parameters *par)
{
unsigned int tid = threadIdx.x + blockIdx.x*blockDim.x;
while (tid < par->N) {
if ((p[tid].phaseType == 1) || (p[tid].phaseType == -1))
{
real strainRate = sqrt(4.0) * sqrt( pow2(p[tid].str.x) + pow2(p[tid].str.y + p[tid].str.z) + pow2(p[tid].str.w) );
real nut = (par->SOIL_COHESION + p[tid].p * tan(par->SOIL_INTERNAL_ANGLE)) / strainRate;
p[tid].nut = par->SOIL_MAXIMAL_VISCOSITY / p[tid].d;
if ( nut < par->SOIL_MAXIMAL_VISCOSITY )
{
p[tid].nut = nut / p[tid].d;
}
if (nut < par->SOIL_MINIMAL_VISCOSITY)
{
p[tid].nut = par->SOIL_MINIMAL_VISCOSITY / p[tid].d;
}
}
tid += blockDim.x * gridDim.x;
}
}
| a93f953a7b19de69aaa8ab15670e1082f301d069.cu | /*
* calcSoilViscositySO.cu
*
* Created on: 15-04-2015
* Author: Kamil Szewc
*
*/
#include "../../sph.h"
#include "../../hlp.h"
__global__ void calcSoilViscositySO(Particle *p, Parameters *par)
{
unsigned int tid = threadIdx.x + blockIdx.x*blockDim.x;
while (tid < par->N) {
if ((p[tid].phaseType == 1) || (p[tid].phaseType == -1))
{
real strainRate = sqrt(4.0) * sqrt( pow2(p[tid].str.x) + pow2(p[tid].str.y + p[tid].str.z) + pow2(p[tid].str.w) );
real nut = (par->SOIL_COHESION + p[tid].p * tan(par->SOIL_INTERNAL_ANGLE)) / strainRate;
p[tid].nut = par->SOIL_MAXIMAL_VISCOSITY / p[tid].d;
if ( nut < par->SOIL_MAXIMAL_VISCOSITY )
{
p[tid].nut = nut / p[tid].d;
}
if (nut < par->SOIL_MINIMAL_VISCOSITY)
{
p[tid].nut = par->SOIL_MINIMAL_VISCOSITY / p[tid].d;
}
}
tid += blockDim.x * gridDim.x;
}
}
|
6be1e9bf7b12f0c987bf8a77d55e4a3a8e876301.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void cube(float * d_out, float * d_in){
int idx = threadIdx.x;
float f = d_in[idx];
d_out[idx] = f * f * f;
}
int main(int argc, char ** argv) {
const int ARRAY_SIZE = 64;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// generate the input array on the host
float h_in[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++) {
h_in[i] = float(i);
}
float h_out[ARRAY_SIZE];
// declare GPU memory pointers
float * d_in;
float * d_out;
// allocate GPU memory
hipMalloc((void**) &d_in, ARRAY_BYTES);
hipMalloc((void**) &d_out, ARRAY_BYTES);
// transfer the array to the GPU
hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice);
// launch the kernel
hipLaunchKernelGGL(( cube), dim3(1), dim3(ARRAY_SIZE), 0, 0, d_out, d_in);
// copy back the result array to the CPU
hipMemcpy(h_out, d_out, ARRAY_BYTES, hipMemcpyDeviceToHost);
// print out the resulting array
for (int i =0; i < ARRAY_SIZE; i++) {
printf("%f", h_out[i]);
printf(((i % 4) != 3) ? "\t" : "\n");
}
hipFree(d_in);
hipFree(d_out);
return 0;
}
| 6be1e9bf7b12f0c987bf8a77d55e4a3a8e876301.cu | #include <stdio.h>
__global__ void cube(float * d_out, float * d_in){
int idx = threadIdx.x;
float f = d_in[idx];
d_out[idx] = f * f * f;
}
int main(int argc, char ** argv) {
const int ARRAY_SIZE = 64;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// generate the input array on the host
float h_in[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++) {
h_in[i] = float(i);
}
float h_out[ARRAY_SIZE];
// declare GPU memory pointers
float * d_in;
float * d_out;
// allocate GPU memory
cudaMalloc((void**) &d_in, ARRAY_BYTES);
cudaMalloc((void**) &d_out, ARRAY_BYTES);
// transfer the array to the GPU
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
// launch the kernel
cube<<<1, ARRAY_SIZE>>>(d_out, d_in);
// copy back the result array to the CPU
cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);
// print out the resulting array
for (int i =0; i < ARRAY_SIZE; i++) {
printf("%f", h_out[i]);
printf(((i % 4) != 3) ? "\t" : "\n");
}
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
|
6879e4268f7c5c269cd12495592962cb061c83e3.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include "hip/hip_runtime.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
hipError_t error = hipGetLastError ();
if (error != hipSuccess) {
printf ("CUDA error : %s, %s\n", message, hipGetErrorString (error));
exit(-1);
}
}
__global__ void j3d125pt (double * __restrict__ t_in, double * __restrict__ t_out, int N) {
//Determing the block's indices
int i0 = (int)(blockIdx.x)*(int)(blockDim.x) + 2;
int i = max(i0,2) + (int)(threadIdx.x);
int j0 = 4*(int)(blockIdx.y)*(int)(blockDim.y) + 2;
int j = max(j0,2) + 4*(int)(threadIdx.y);
int k0 = (int)(blockIdx.z)*(int)(blockDim.z) + 2;
int k = max(k0,2) + (int)(threadIdx.z);
double (*in)[516][516] = (double (*)[516][516])t_in;
double (*out)[516][516] = (double (*)[516][516])t_out;
if (i>=2 && i<=N-3 && j>=2 && j<=N-3 && k>=2 && k<=N-3) {
double outkc0jc0ic0 = 0.75 * in[k-2][j-2][i-2];
outkc0jc0ic0 += 1.132 * in[k-2][j-2][i-1];
outkc0jc0ic0 += 0.217 * in[k-2][j-2][i];
outkc0jc0ic0 += 1.132 * in[k-2][j-2][i+1];
outkc0jc0ic0 += 0.75 * in[k-2][j-2][i+2];
outkc0jc0ic0 += 1.132 * in[k-1][j-2][i-2];
outkc0jc0ic0 += 0.331 * in[k-1][j-2][i-1];
outkc0jc0ic0 += 2.13 * in[k-1][j-2][i];
outkc0jc0ic0 += 0.331 * in[k-1][j-2][i+1];
outkc0jc0ic0 += 1.132 * in[k-1][j-2][i+2];
outkc0jc0ic0 += 0.217 * in[k][j-2][i-2];
outkc0jc0ic0 += 2.13 * in[k][j-2][i-1];
outkc0jc0ic0 += 1.132 * in[k][j-2][i];
outkc0jc0ic0 += 2.13 * in[k][j-2][i+1];
outkc0jc0ic0 += 0.217 * in[k][j-2][i+2];
outkc0jc0ic0 += 1.132 * in[k+1][j-2][i-2];
outkc0jc0ic0 += 0.331 * in[k+1][j-2][i-1];
outkc0jc0ic0 += 2.13 * in[k+1][j-2][i];
outkc0jc0ic0 += 0.331 * in[k+1][j-2][i+1];
outkc0jc0ic0 += 1.132 * in[k+1][j-2][i+2];
outkc0jc0ic0 += 0.75 * in[k+2][j-2][i-2];
outkc0jc0ic0 += 1.132 * in[k+2][j-2][i-1];
outkc0jc0ic0 += 0.217 * in[k+2][j-2][i];
outkc0jc0ic0 += 1.132 * in[k+2][j-2][i+1];
outkc0jc0ic0 += 0.75 * in[k+2][j-2][i+2];
outkc0jc0ic0 += 1.132 * in[k-2][j-1][i-2];
double outkc0jp1ic0 = 0.75 * in[k-2][j-1][i-2];
outkc0jc0ic0 += 0.331 * in[k-2][j-1][i-1];
outkc0jp1ic0 += 1.132 * in[k-2][j-1][i-1];
outkc0jc0ic0 += 2.13 * in[k-2][j-1][i];
outkc0jp1ic0 += 0.217 * in[k-2][j-1][i];
outkc0jc0ic0 += 0.331 * in[k-2][j-1][i+1];
outkc0jp1ic0 += 1.132 * in[k-2][j-1][i+1];
outkc0jc0ic0 += 1.132 * in[k-2][j-1][i+2];
outkc0jp1ic0 += 0.75 * in[k-2][j-1][i+2];
outkc0jc0ic0 += 0.217 * in[k-2][j][i-2];
outkc0jp1ic0 += 1.132 * in[k-2][j][i-2];
double outkc0jp2ic0 = 0.75 * in[k-2][j][i-2];
outkc0jc0ic0 += 2.13 * in[k-2][j][i-1];
outkc0jp1ic0 += 0.331 * in[k-2][j][i-1];
outkc0jp2ic0 += 1.132 * in[k-2][j][i-1];
outkc0jc0ic0 += 1.132 * in[k-2][j][i];
outkc0jp1ic0 += 2.13 * in[k-2][j][i];
outkc0jp2ic0 += 0.217 * in[k-2][j][i];
outkc0jc0ic0 += 2.13 * in[k-2][j][i+1];
outkc0jp1ic0 += 0.331 * in[k-2][j][i+1];
outkc0jp2ic0 += 1.132 * in[k-2][j][i+1];
outkc0jc0ic0 += 0.217 * in[k-2][j][i+2];
outkc0jp1ic0 += 1.132 * in[k-2][j][i+2];
outkc0jp2ic0 += 0.75 * in[k-2][j][i+2];
outkc0jc0ic0 += 1.132 * in[k-2][j+1][i-2];
outkc0jp1ic0 += 0.217 * in[k-2][j+1][i-2];
outkc0jp2ic0 += 1.132 * in[k-2][j+1][i-2];
double outkc0jp3ic0 = 0.75 * in[k-2][j+1][i-2];
outkc0jc0ic0 += 0.331 * in[k-2][j+1][i-1];
outkc0jp1ic0 += 2.13 * in[k-2][j+1][i-1];
outkc0jp2ic0 += 0.331 * in[k-2][j+1][i-1];
outkc0jp3ic0 += 1.132 * in[k-2][j+1][i-1];
outkc0jc0ic0 += 2.13 * in[k-2][j+1][i];
outkc0jp1ic0 += 1.132 * in[k-2][j+1][i];
outkc0jp2ic0 += 2.13 * in[k-2][j+1][i];
outkc0jp3ic0 += 0.217 * in[k-2][j+1][i];
outkc0jc0ic0 += 0.331 * in[k-2][j+1][i+1];
outkc0jp1ic0 += 2.13 * in[k-2][j+1][i+1];
outkc0jp2ic0 += 0.331 * in[k-2][j+1][i+1];
outkc0jp3ic0 += 1.132 * in[k-2][j+1][i+1];
outkc0jc0ic0 += 1.132 * in[k-2][j+1][i+2];
outkc0jp1ic0 += 0.217 * in[k-2][j+1][i+2];
outkc0jp2ic0 += 1.132 * in[k-2][j+1][i+2];
outkc0jp3ic0 += 0.75 * in[k-2][j+1][i+2];
outkc0jc0ic0 += 0.75 * in[k-2][j+2][i-2];
outkc0jp1ic0 += 1.132 * in[k-2][j+2][i-2];
outkc0jp2ic0 += 0.217 * in[k-2][j+2][i-2];
outkc0jp3ic0 += 1.132 * in[k-2][j+2][i-2];
outkc0jc0ic0 += 1.132 * in[k-2][j+2][i-1];
outkc0jp1ic0 += 0.331 * in[k-2][j+2][i-1];
outkc0jp2ic0 += 2.13 * in[k-2][j+2][i-1];
outkc0jp3ic0 += 0.331 * in[k-2][j+2][i-1];
outkc0jc0ic0 += 0.217 * in[k-2][j+2][i];
outkc0jp1ic0 += 2.13 * in[k-2][j+2][i];
outkc0jp2ic0 += 1.132 * in[k-2][j+2][i];
outkc0jp3ic0 += 2.13 * in[k-2][j+2][i];
outkc0jc0ic0 += 1.132 * in[k-2][j+2][i+1];
outkc0jp1ic0 += 0.331 * in[k-2][j+2][i+1];
outkc0jp2ic0 += 2.13 * in[k-2][j+2][i+1];
outkc0jp3ic0 += 0.331 * in[k-2][j+2][i+1];
outkc0jc0ic0 += 0.75 * in[k-2][j+2][i+2];
outkc0jp1ic0 += 1.132 * in[k-2][j+2][i+2];
outkc0jp2ic0 += 0.217 * in[k-2][j+2][i+2];
outkc0jp3ic0 += 1.132 * in[k-2][j+2][i+2];
outkc0jc0ic0 += 0.331 * in[k-1][j-1][i-2];
outkc0jp1ic0 += 1.132 * in[k-1][j-1][i-2];
outkc0jc0ic0 += 0.75 * in[k-1][j-1][i-1];
outkc0jp1ic0 += 0.331 * in[k-1][j-1][i-1];
outkc0jc0ic0 += 0.217 * in[k-1][j-1][i];
outkc0jp1ic0 += 2.13 * in[k-1][j-1][i];
outkc0jc0ic0 += 0.75 * in[k-1][j-1][i+1];
outkc0jp1ic0 += 0.331 * in[k-1][j-1][i+1];
outkc0jc0ic0 += 0.331 * in[k-1][j-1][i+2];
outkc0jp1ic0 += 1.132 * in[k-1][j-1][i+2];
outkc0jc0ic0 += 2.13 * in[k-1][j][i-2];
outkc0jp1ic0 += 0.331 * in[k-1][j][i-2];
outkc0jp2ic0 += 1.132 * in[k-1][j][i-2];
outkc0jc0ic0 += 0.217 * in[k-1][j][i-1];
outkc0jp1ic0 += 0.75 * in[k-1][j][i-1];
outkc0jp2ic0 += 0.331 * in[k-1][j][i-1];
outkc0jc0ic0 += 0.331 * in[k-1][j][i];
outkc0jp1ic0 += 0.217 * in[k-1][j][i];
outkc0jp2ic0 += 2.13 * in[k-1][j][i];
outkc0jc0ic0 += 0.217 * in[k-1][j][i+1];
outkc0jp1ic0 += 0.75 * in[k-1][j][i+1];
outkc0jp2ic0 += 0.331 * in[k-1][j][i+1];
outkc0jc0ic0 += 2.13 * in[k-1][j][i+2];
outkc0jp1ic0 += 0.331 * in[k-1][j][i+2];
outkc0jp2ic0 += 1.132 * in[k-1][j][i+2];
outkc0jc0ic0 += 0.331 * in[k-1][j+1][i-2];
outkc0jp1ic0 += 2.13 * in[k-1][j+1][i-2];
outkc0jp2ic0 += 0.331 * in[k-1][j+1][i-2];
outkc0jp3ic0 += 1.132 * in[k-1][j+1][i-2];
outkc0jc0ic0 += 0.75 * in[k-1][j+1][i-1];
outkc0jp1ic0 += 0.217 * in[k-1][j+1][i-1];
outkc0jp2ic0 += 0.75 * in[k-1][j+1][i-1];
outkc0jp3ic0 += 0.331 * in[k-1][j+1][i-1];
outkc0jc0ic0 += 0.217 * in[k-1][j+1][i];
outkc0jp1ic0 += 0.331 * in[k-1][j+1][i];
outkc0jp2ic0 += 0.217 * in[k-1][j+1][i];
outkc0jp3ic0 += 2.13 * in[k-1][j+1][i];
outkc0jc0ic0 += 0.75 * in[k-1][j+1][i+1];
outkc0jp1ic0 += 0.217 * in[k-1][j+1][i+1];
outkc0jp2ic0 += 0.75 * in[k-1][j+1][i+1];
outkc0jp3ic0 += 0.331 * in[k-1][j+1][i+1];
outkc0jc0ic0 += 0.331 * in[k-1][j+1][i+2];
outkc0jp1ic0 += 2.13 * in[k-1][j+1][i+2];
outkc0jp2ic0 += 0.331 * in[k-1][j+1][i+2];
outkc0jp3ic0 += 1.132 * in[k-1][j+1][i+2];
outkc0jc0ic0 += 1.132 * in[k-1][j+2][i-2];
outkc0jp1ic0 += 0.331 * in[k-1][j+2][i-2];
outkc0jp2ic0 += 2.13 * in[k-1][j+2][i-2];
outkc0jp3ic0 += 0.331 * in[k-1][j+2][i-2];
outkc0jc0ic0 += 0.331 * in[k-1][j+2][i-1];
outkc0jp1ic0 += 0.75 * in[k-1][j+2][i-1];
outkc0jp2ic0 += 0.217 * in[k-1][j+2][i-1];
outkc0jp3ic0 += 0.75 * in[k-1][j+2][i-1];
outkc0jc0ic0 += 2.13 * in[k-1][j+2][i];
outkc0jp1ic0 += 0.217 * in[k-1][j+2][i];
outkc0jp2ic0 += 0.331 * in[k-1][j+2][i];
outkc0jp3ic0 += 0.217 * in[k-1][j+2][i];
outkc0jc0ic0 += 0.331 * in[k-1][j+2][i+1];
outkc0jp1ic0 += 0.75 * in[k-1][j+2][i+1];
outkc0jp2ic0 += 0.217 * in[k-1][j+2][i+1];
outkc0jp3ic0 += 0.75 * in[k-1][j+2][i+1];
outkc0jc0ic0 += 1.132 * in[k-1][j+2][i+2];
outkc0jp1ic0 += 0.331 * in[k-1][j+2][i+2];
outkc0jp2ic0 += 2.13 * in[k-1][j+2][i+2];
outkc0jp3ic0 += 0.331 * in[k-1][j+2][i+2];
outkc0jc0ic0 += 2.13 * in[k][j-1][i-2];
outkc0jp1ic0 += 0.217 * in[k][j-1][i-2];
outkc0jc0ic0 += 0.331 * in[k][j-1][i-1];
outkc0jp1ic0 += 2.13 * in[k][j-1][i-1];
outkc0jc0ic0 += 0.75 * in[k][j-1][i];
outkc0jp1ic0 += 1.132 * in[k][j-1][i];
outkc0jc0ic0 += 0.331 * in[k][j-1][i+1];
outkc0jp1ic0 += 2.13 * in[k][j-1][i+1];
outkc0jc0ic0 += 2.13 * in[k][j-1][i+2];
outkc0jp1ic0 += 0.217 * in[k][j-1][i+2];
outkc0jc0ic0 += 1.132 * in[k][j][i-2];
outkc0jp1ic0 += 2.13 * in[k][j][i-2];
outkc0jp2ic0 += 0.217 * in[k][j][i-2];
outkc0jc0ic0 += 0.75 * in[k][j][i-1];
outkc0jp1ic0 += 0.331 * in[k][j][i-1];
outkc0jp2ic0 += 2.13 * in[k][j][i-1];
outkc0jc0ic0 += 2.13 * in[k][j][i];
outkc0jp1ic0 += 0.75 * in[k][j][i];
outkc0jp2ic0 += 1.132 * in[k][j][i];
outkc0jc0ic0 += 0.75 * in[k][j][i+1];
outkc0jp1ic0 += 0.331 * in[k][j][i+1];
outkc0jp2ic0 += 2.13 * in[k][j][i+1];
outkc0jc0ic0 += 1.132 * in[k][j][i+2];
outkc0jp1ic0 += 2.13 * in[k][j][i+2];
outkc0jp2ic0 += 0.217 * in[k][j][i+2];
outkc0jc0ic0 += 2.13 * in[k][j+1][i-2];
outkc0jp1ic0 += 1.132 * in[k][j+1][i-2];
outkc0jp2ic0 += 2.13 * in[k][j+1][i-2];
outkc0jp3ic0 += 0.217 * in[k][j+1][i-2];
outkc0jc0ic0 += 0.331 * in[k][j+1][i-1];
outkc0jp1ic0 += 0.75 * in[k][j+1][i-1];
outkc0jp2ic0 += 0.331 * in[k][j+1][i-1];
outkc0jp3ic0 += 2.13 * in[k][j+1][i-1];
outkc0jc0ic0 += 0.75 * in[k][j+1][i];
outkc0jp1ic0 += 2.13 * in[k][j+1][i];
outkc0jp2ic0 += 0.75 * in[k][j+1][i];
outkc0jp3ic0 += 1.132 * in[k][j+1][i];
outkc0jc0ic0 += 0.331 * in[k][j+1][i+1];
outkc0jp1ic0 += 0.75 * in[k][j+1][i+1];
outkc0jp2ic0 += 0.331 * in[k][j+1][i+1];
outkc0jp3ic0 += 2.13 * in[k][j+1][i+1];
outkc0jc0ic0 += 2.13 * in[k][j+1][i+2];
outkc0jp1ic0 += 1.132 * in[k][j+1][i+2];
outkc0jp2ic0 += 2.13 * in[k][j+1][i+2];
outkc0jp3ic0 += 0.217 * in[k][j+1][i+2];
outkc0jc0ic0 += 0.217 * in[k][j+2][i-2];
outkc0jp1ic0 += 2.13 * in[k][j+2][i-2];
outkc0jp2ic0 += 1.132 * in[k][j+2][i-2];
outkc0jp3ic0 += 2.13 * in[k][j+2][i-2];
outkc0jc0ic0 += 2.13 * in[k][j+2][i-1];
outkc0jp1ic0 += 0.331 * in[k][j+2][i-1];
outkc0jp2ic0 += 0.75 * in[k][j+2][i-1];
outkc0jp3ic0 += 0.331 * in[k][j+2][i-1];
outkc0jc0ic0 += 1.132 * in[k][j+2][i];
outkc0jp1ic0 += 0.75 * in[k][j+2][i];
outkc0jp2ic0 += 2.13 * in[k][j+2][i];
outkc0jp3ic0 += 0.75 * in[k][j+2][i];
outkc0jc0ic0 += 2.13 * in[k][j+2][i+1];
outkc0jp1ic0 += 0.331 * in[k][j+2][i+1];
outkc0jp2ic0 += 0.75 * in[k][j+2][i+1];
outkc0jp3ic0 += 0.331 * in[k][j+2][i+1];
outkc0jc0ic0 += 0.217 * in[k][j+2][i+2];
outkc0jp1ic0 += 2.13 * in[k][j+2][i+2];
outkc0jp2ic0 += 1.132 * in[k][j+2][i+2];
outkc0jp3ic0 += 2.13 * in[k][j+2][i+2];
outkc0jc0ic0 += 0.331 * in[k+1][j-1][i-2];
outkc0jp1ic0 += 1.132 * in[k+1][j-1][i-2];
outkc0jc0ic0 += 0.75 * in[k+1][j-1][i-1];
outkc0jp1ic0 += 0.331 * in[k+1][j-1][i-1];
outkc0jc0ic0 += 0.217 * in[k+1][j-1][i];
outkc0jp1ic0 += 2.13 * in[k+1][j-1][i];
outkc0jc0ic0 += 0.75 * in[k+1][j-1][i+1];
outkc0jp1ic0 += 0.331 * in[k+1][j-1][i+1];
outkc0jc0ic0 += 0.331 * in[k+1][j-1][i+2];
outkc0jp1ic0 += 1.132 * in[k+1][j-1][i+2];
outkc0jc0ic0 += 2.13 * in[k+1][j][i-2];
outkc0jp1ic0 += 0.331 * in[k+1][j][i-2];
outkc0jp2ic0 += 1.132 * in[k+1][j][i-2];
outkc0jc0ic0 += 0.217 * in[k+1][j][i-1];
outkc0jp1ic0 += 0.75 * in[k+1][j][i-1];
outkc0jp2ic0 += 0.331 * in[k+1][j][i-1];
outkc0jc0ic0 += 0.331 * in[k+1][j][i];
outkc0jp1ic0 += 0.217 * in[k+1][j][i];
outkc0jp2ic0 += 2.13 * in[k+1][j][i];
outkc0jc0ic0 += 0.217 * in[k+1][j][i+1];
outkc0jp1ic0 += 0.75 * in[k+1][j][i+1];
outkc0jp2ic0 += 0.331 * in[k+1][j][i+1];
outkc0jc0ic0 += 2.13 * in[k+1][j][i+2];
outkc0jp1ic0 += 0.331 * in[k+1][j][i+2];
outkc0jp2ic0 += 1.132 * in[k+1][j][i+2];
outkc0jc0ic0 += 0.331 * in[k+1][j+1][i-2];
outkc0jp1ic0 += 2.13 * in[k+1][j+1][i-2];
outkc0jp2ic0 += 0.331 * in[k+1][j+1][i-2];
outkc0jp3ic0 += 1.132 * in[k+1][j+1][i-2];
outkc0jc0ic0 += 0.75 * in[k+1][j+1][i-1];
outkc0jp1ic0 += 0.217 * in[k+1][j+1][i-1];
outkc0jp2ic0 += 0.75 * in[k+1][j+1][i-1];
outkc0jp3ic0 += 0.331 * in[k+1][j+1][i-1];
outkc0jc0ic0 += 0.217 * in[k+1][j+1][i];
outkc0jp1ic0 += 0.331 * in[k+1][j+1][i];
outkc0jp2ic0 += 0.217 * in[k+1][j+1][i];
outkc0jp3ic0 += 2.13 * in[k+1][j+1][i];
outkc0jc0ic0 += 0.75 * in[k+1][j+1][i+1];
outkc0jp1ic0 += 0.217 * in[k+1][j+1][i+1];
outkc0jp2ic0 += 0.75 * in[k+1][j+1][i+1];
outkc0jp3ic0 += 0.331 * in[k+1][j+1][i+1];
outkc0jc0ic0 += 0.331 * in[k+1][j+1][i+2];
outkc0jp1ic0 += 2.13 * in[k+1][j+1][i+2];
outkc0jp2ic0 += 0.331 * in[k+1][j+1][i+2];
outkc0jp3ic0 += 1.132 * in[k+1][j+1][i+2];
outkc0jc0ic0 += 1.132 * in[k+1][j+2][i-2];
outkc0jp1ic0 += 0.331 * in[k+1][j+2][i-2];
outkc0jp2ic0 += 2.13 * in[k+1][j+2][i-2];
outkc0jp3ic0 += 0.331 * in[k+1][j+2][i-2];
outkc0jc0ic0 += 0.331 * in[k+1][j+2][i-1];
outkc0jp1ic0 += 0.75 * in[k+1][j+2][i-1];
outkc0jp2ic0 += 0.217 * in[k+1][j+2][i-1];
outkc0jp3ic0 += 0.75 * in[k+1][j+2][i-1];
outkc0jc0ic0 += 2.13 * in[k+1][j+2][i];
outkc0jp1ic0 += 0.217 * in[k+1][j+2][i];
outkc0jp2ic0 += 0.331 * in[k+1][j+2][i];
outkc0jp3ic0 += 0.217 * in[k+1][j+2][i];
outkc0jc0ic0 += 0.331 * in[k+1][j+2][i+1];
outkc0jp1ic0 += 0.75 * in[k+1][j+2][i+1];
outkc0jp2ic0 += 0.217 * in[k+1][j+2][i+1];
outkc0jp3ic0 += 0.75 * in[k+1][j+2][i+1];
outkc0jc0ic0 += 1.132 * in[k+1][j+2][i+2];
outkc0jp1ic0 += 0.331 * in[k+1][j+2][i+2];
outkc0jp2ic0 += 2.13 * in[k+1][j+2][i+2];
outkc0jp3ic0 += 0.331 * in[k+1][j+2][i+2];
outkc0jc0ic0 += 1.132 * in[k+2][j-1][i-2];
outkc0jp1ic0 += 0.75 * in[k+2][j-1][i-2];
outkc0jc0ic0 += 0.331 * in[k+2][j-1][i-1];
outkc0jp1ic0 += 1.132 * in[k+2][j-1][i-1];
outkc0jc0ic0 += 2.13 * in[k+2][j-1][i];
outkc0jp1ic0 += 0.217 * in[k+2][j-1][i];
outkc0jc0ic0 += 0.331 * in[k+2][j-1][i+1];
outkc0jp1ic0 += 1.132 * in[k+2][j-1][i+1];
outkc0jc0ic0 += 1.132 * in[k+2][j-1][i+2];
outkc0jp1ic0 += 0.75 * in[k+2][j-1][i+2];
outkc0jc0ic0 += 0.217 * in[k+2][j][i-2];
outkc0jp1ic0 += 1.132 * in[k+2][j][i-2];
outkc0jp2ic0 += 0.75 * in[k+2][j][i-2];
outkc0jc0ic0 += 2.13 * in[k+2][j][i-1];
outkc0jp1ic0 += 0.331 * in[k+2][j][i-1];
outkc0jp2ic0 += 1.132 * in[k+2][j][i-1];
outkc0jc0ic0 += 1.132 * in[k+2][j][i];
outkc0jp1ic0 += 2.13 * in[k+2][j][i];
outkc0jp2ic0 += 0.217 * in[k+2][j][i];
outkc0jc0ic0 += 2.13 * in[k+2][j][i+1];
outkc0jp1ic0 += 0.331 * in[k+2][j][i+1];
outkc0jp2ic0 += 1.132 * in[k+2][j][i+1];
outkc0jc0ic0 += 0.217 * in[k+2][j][i+2];
outkc0jp1ic0 += 1.132 * in[k+2][j][i+2];
outkc0jp2ic0 += 0.75 * in[k+2][j][i+2];
outkc0jc0ic0 += 1.132 * in[k+2][j+1][i-2];
outkc0jp1ic0 += 0.217 * in[k+2][j+1][i-2];
outkc0jp2ic0 += 1.132 * in[k+2][j+1][i-2];
outkc0jp3ic0 += 0.75 * in[k+2][j+1][i-2];
outkc0jc0ic0 += 0.331 * in[k+2][j+1][i-1];
outkc0jp1ic0 += 2.13 * in[k+2][j+1][i-1];
outkc0jp2ic0 += 0.331 * in[k+2][j+1][i-1];
outkc0jp3ic0 += 1.132 * in[k+2][j+1][i-1];
outkc0jc0ic0 += 2.13 * in[k+2][j+1][i];
outkc0jp1ic0 += 1.132 * in[k+2][j+1][i];
outkc0jp2ic0 += 2.13 * in[k+2][j+1][i];
outkc0jp3ic0 += 0.217 * in[k+2][j+1][i];
outkc0jc0ic0 += 0.331 * in[k+2][j+1][i+1];
outkc0jp1ic0 += 2.13 * in[k+2][j+1][i+1];
outkc0jp2ic0 += 0.331 * in[k+2][j+1][i+1];
outkc0jp3ic0 += 1.132 * in[k+2][j+1][i+1];
outkc0jc0ic0 += 1.132 * in[k+2][j+1][i+2];
outkc0jp1ic0 += 0.217 * in[k+2][j+1][i+2];
outkc0jp2ic0 += 1.132 * in[k+2][j+1][i+2];
outkc0jp3ic0 += 0.75 * in[k+2][j+1][i+2];
outkc0jc0ic0 += 0.75 * in[k+2][j+2][i-2];
outkc0jp1ic0 += 1.132 * in[k+2][j+2][i-2];
outkc0jp2ic0 += 0.217 * in[k+2][j+2][i-2];
outkc0jp3ic0 += 1.132 * in[k+2][j+2][i-2];
outkc0jc0ic0 += 1.132 * in[k+2][j+2][i-1];
outkc0jp1ic0 += 0.331 * in[k+2][j+2][i-1];
outkc0jp2ic0 += 2.13 * in[k+2][j+2][i-1];
outkc0jp3ic0 += 0.331 * in[k+2][j+2][i-1];
outkc0jc0ic0 += 0.217 * in[k+2][j+2][i];
outkc0jp1ic0 += 2.13 * in[k+2][j+2][i];
outkc0jp2ic0 += 1.132 * in[k+2][j+2][i];
outkc0jp3ic0 += 2.13 * in[k+2][j+2][i];
outkc0jc0ic0 += 1.132 * in[k+2][j+2][i+1];
outkc0jp1ic0 += 0.331 * in[k+2][j+2][i+1];
outkc0jp2ic0 += 2.13 * in[k+2][j+2][i+1];
outkc0jp3ic0 += 0.331 * in[k+2][j+2][i+1];
outkc0jc0ic0 += 0.75 * in[k+2][j+2][i+2];
outkc0jp1ic0 += 1.132 * in[k+2][j+2][i+2];
outkc0jp2ic0 += 0.217 * in[k+2][j+2][i+2];
outkc0jp3ic0 += 1.132 * in[k+2][j+2][i+2];
outkc0jp1ic0 += 0.75 * in[k-2][j+3][i-2];
outkc0jp2ic0 += 1.132 * in[k-2][j+3][i-2];
outkc0jp3ic0 += 0.217 * in[k-2][j+3][i-2];
outkc0jp1ic0 += 1.132 * in[k-2][j+3][i-1];
outkc0jp2ic0 += 0.331 * in[k-2][j+3][i-1];
outkc0jp3ic0 += 2.13 * in[k-2][j+3][i-1];
outkc0jp1ic0 += 0.217 * in[k-2][j+3][i];
outkc0jp2ic0 += 2.13 * in[k-2][j+3][i];
outkc0jp3ic0 += 1.132 * in[k-2][j+3][i];
outkc0jp1ic0 += 1.132 * in[k-2][j+3][i+1];
outkc0jp2ic0 += 0.331 * in[k-2][j+3][i+1];
outkc0jp3ic0 += 2.13 * in[k-2][j+3][i+1];
outkc0jp1ic0 += 0.75 * in[k-2][j+3][i+2];
outkc0jp2ic0 += 1.132 * in[k-2][j+3][i+2];
outkc0jp3ic0 += 0.217 * in[k-2][j+3][i+2];
outkc0jp1ic0 += 1.132 * in[k-1][j+3][i-2];
outkc0jp2ic0 += 0.331 * in[k-1][j+3][i-2];
outkc0jp3ic0 += 2.13 * in[k-1][j+3][i-2];
outkc0jp1ic0 += 0.331 * in[k-1][j+3][i-1];
outkc0jp2ic0 += 0.75 * in[k-1][j+3][i-1];
outkc0jp3ic0 += 0.217 * in[k-1][j+3][i-1];
outkc0jp1ic0 += 2.13 * in[k-1][j+3][i];
outkc0jp2ic0 += 0.217 * in[k-1][j+3][i];
outkc0jp3ic0 += 0.331 * in[k-1][j+3][i];
outkc0jp1ic0 += 0.331 * in[k-1][j+3][i+1];
outkc0jp2ic0 += 0.75 * in[k-1][j+3][i+1];
outkc0jp3ic0 += 0.217 * in[k-1][j+3][i+1];
outkc0jp1ic0 += 1.132 * in[k-1][j+3][i+2];
outkc0jp2ic0 += 0.331 * in[k-1][j+3][i+2];
outkc0jp3ic0 += 2.13 * in[k-1][j+3][i+2];
outkc0jp1ic0 += 0.217 * in[k][j+3][i-2];
outkc0jp2ic0 += 2.13 * in[k][j+3][i-2];
outkc0jp3ic0 += 1.132 * in[k][j+3][i-2];
outkc0jp1ic0 += 2.13 * in[k][j+3][i-1];
outkc0jp2ic0 += 0.331 * in[k][j+3][i-1];
outkc0jp3ic0 += 0.75 * in[k][j+3][i-1];
outkc0jp1ic0 += 1.132 * in[k][j+3][i];
outkc0jp2ic0 += 0.75 * in[k][j+3][i];
outkc0jp3ic0 += 2.13 * in[k][j+3][i];
outkc0jp1ic0 += 2.13 * in[k][j+3][i+1];
outkc0jp2ic0 += 0.331 * in[k][j+3][i+1];
outkc0jp3ic0 += 0.75 * in[k][j+3][i+1];
outkc0jp1ic0 += 0.217 * in[k][j+3][i+2];
outkc0jp2ic0 += 2.13 * in[k][j+3][i+2];
outkc0jp3ic0 += 1.132 * in[k][j+3][i+2];
outkc0jp1ic0 += 1.132 * in[k+1][j+3][i-2];
outkc0jp2ic0 += 0.331 * in[k+1][j+3][i-2];
outkc0jp3ic0 += 2.13 * in[k+1][j+3][i-2];
outkc0jp1ic0 += 0.331 * in[k+1][j+3][i-1];
outkc0jp2ic0 += 0.75 * in[k+1][j+3][i-1];
outkc0jp3ic0 += 0.217 * in[k+1][j+3][i-1];
outkc0jp1ic0 += 2.13 * in[k+1][j+3][i];
outkc0jp2ic0 += 0.217 * in[k+1][j+3][i];
outkc0jp3ic0 += 0.331 * in[k+1][j+3][i];
outkc0jp1ic0 += 0.331 * in[k+1][j+3][i+1];
outkc0jp2ic0 += 0.75 * in[k+1][j+3][i+1];
outkc0jp3ic0 += 0.217 * in[k+1][j+3][i+1];
outkc0jp1ic0 += 1.132 * in[k+1][j+3][i+2];
outkc0jp2ic0 += 0.331 * in[k+1][j+3][i+2];
outkc0jp3ic0 += 2.13 * in[k+1][j+3][i+2];
outkc0jp1ic0 += 0.75 * in[k+2][j+3][i-2];
outkc0jp2ic0 += 1.132 * in[k+2][j+3][i-2];
outkc0jp3ic0 += 0.217 * in[k+2][j+3][i-2];
outkc0jp1ic0 += 1.132 * in[k+2][j+3][i-1];
outkc0jp2ic0 += 0.331 * in[k+2][j+3][i-1];
outkc0jp3ic0 += 2.13 * in[k+2][j+3][i-1];
outkc0jp1ic0 += 0.217 * in[k+2][j+3][i];
outkc0jp2ic0 += 2.13 * in[k+2][j+3][i];
outkc0jp3ic0 += 1.132 * in[k+2][j+3][i];
outkc0jp1ic0 += 1.132 * in[k+2][j+3][i+1];
outkc0jp2ic0 += 0.331 * in[k+2][j+3][i+1];
outkc0jp3ic0 += 2.13 * in[k+2][j+3][i+1];
outkc0jp1ic0 += 0.75 * in[k+2][j+3][i+2];
outkc0jp2ic0 += 1.132 * in[k+2][j+3][i+2];
outkc0jp3ic0 += 0.217 * in[k+2][j+3][i+2];
outkc0jp2ic0 += 0.75 * in[k-2][j+4][i-2];
outkc0jp3ic0 += 1.132 * in[k-2][j+4][i-2];
outkc0jp2ic0 += 1.132 * in[k-2][j+4][i-1];
outkc0jp3ic0 += 0.331 * in[k-2][j+4][i-1];
outkc0jp2ic0 += 0.217 * in[k-2][j+4][i];
outkc0jp3ic0 += 2.13 * in[k-2][j+4][i];
outkc0jp2ic0 += 1.132 * in[k-2][j+4][i+1];
outkc0jp3ic0 += 0.331 * in[k-2][j+4][i+1];
outkc0jp2ic0 += 0.75 * in[k-2][j+4][i+2];
outkc0jp3ic0 += 1.132 * in[k-2][j+4][i+2];
outkc0jp2ic0 += 1.132 * in[k-1][j+4][i-2];
outkc0jp3ic0 += 0.331 * in[k-1][j+4][i-2];
outkc0jp2ic0 += 0.331 * in[k-1][j+4][i-1];
outkc0jp3ic0 += 0.75 * in[k-1][j+4][i-1];
outkc0jp2ic0 += 2.13 * in[k-1][j+4][i];
outkc0jp3ic0 += 0.217 * in[k-1][j+4][i];
outkc0jp2ic0 += 0.331 * in[k-1][j+4][i+1];
outkc0jp3ic0 += 0.75 * in[k-1][j+4][i+1];
outkc0jp2ic0 += 1.132 * in[k-1][j+4][i+2];
outkc0jp3ic0 += 0.331 * in[k-1][j+4][i+2];
outkc0jp2ic0 += 0.217 * in[k][j+4][i-2];
outkc0jp3ic0 += 2.13 * in[k][j+4][i-2];
outkc0jp2ic0 += 2.13 * in[k][j+4][i-1];
outkc0jp3ic0 += 0.331 * in[k][j+4][i-1];
outkc0jp2ic0 += 1.132 * in[k][j+4][i];
outkc0jp3ic0 += 0.75 * in[k][j+4][i];
outkc0jp2ic0 += 2.13 * in[k][j+4][i+1];
outkc0jp3ic0 += 0.331 * in[k][j+4][i+1];
outkc0jp2ic0 += 0.217 * in[k][j+4][i+2];
outkc0jp3ic0 += 2.13 * in[k][j+4][i+2];
outkc0jp2ic0 += 1.132 * in[k+1][j+4][i-2];
outkc0jp3ic0 += 0.331 * in[k+1][j+4][i-2];
outkc0jp2ic0 += 0.331 * in[k+1][j+4][i-1];
outkc0jp3ic0 += 0.75 * in[k+1][j+4][i-1];
outkc0jp2ic0 += 2.13 * in[k+1][j+4][i];
outkc0jp3ic0 += 0.217 * in[k+1][j+4][i];
outkc0jp2ic0 += 0.331 * in[k+1][j+4][i+1];
outkc0jp3ic0 += 0.75 * in[k+1][j+4][i+1];
outkc0jp2ic0 += 1.132 * in[k+1][j+4][i+2];
outkc0jp3ic0 += 0.331 * in[k+1][j+4][i+2];
outkc0jp2ic0 += 0.75 * in[k+2][j+4][i-2];
outkc0jp3ic0 += 1.132 * in[k+2][j+4][i-2];
outkc0jp2ic0 += 1.132 * in[k+2][j+4][i-1];
outkc0jp3ic0 += 0.331 * in[k+2][j+4][i-1];
outkc0jp2ic0 += 0.217 * in[k+2][j+4][i];
outkc0jp3ic0 += 2.13 * in[k+2][j+4][i];
outkc0jp2ic0 += 1.132 * in[k+2][j+4][i+1];
outkc0jp3ic0 += 0.331 * in[k+2][j+4][i+1];
outkc0jp2ic0 += 0.75 * in[k+2][j+4][i+2];
outkc0jp3ic0 += 1.132 * in[k+2][j+4][i+2];
outkc0jp3ic0 += 0.75 * in[k-2][j+5][i-2];
outkc0jp3ic0 += 1.132 * in[k-2][j+5][i-1];
outkc0jp3ic0 += 0.217 * in[k-2][j+5][i];
outkc0jp3ic0 += 1.132 * in[k-2][j+5][i+1];
outkc0jp3ic0 += 0.75 * in[k-2][j+5][i+2];
outkc0jp3ic0 += 1.132 * in[k-1][j+5][i-2];
outkc0jp3ic0 += 0.331 * in[k-1][j+5][i-1];
outkc0jp3ic0 += 2.13 * in[k-1][j+5][i];
outkc0jp3ic0 += 0.331 * in[k-1][j+5][i+1];
outkc0jp3ic0 += 1.132 * in[k-1][j+5][i+2];
outkc0jp3ic0 += 0.217 * in[k][j+5][i-2];
outkc0jp3ic0 += 2.13 * in[k][j+5][i-1];
outkc0jp3ic0 += 1.132 * in[k][j+5][i];
outkc0jp3ic0 += 2.13 * in[k][j+5][i+1];
outkc0jp3ic0 += 0.217 * in[k][j+5][i+2];
outkc0jp3ic0 += 1.132 * in[k+1][j+5][i-2];
outkc0jp3ic0 += 0.331 * in[k+1][j+5][i-1];
outkc0jp3ic0 += 2.13 * in[k+1][j+5][i];
outkc0jp3ic0 += 0.331 * in[k+1][j+5][i+1];
outkc0jp3ic0 += 1.132 * in[k+1][j+5][i+2];
outkc0jp3ic0 += 0.75 * in[k+2][j+5][i-2];
outkc0jp3ic0 += 1.132 * in[k+2][j+5][i-1];
outkc0jp3ic0 += 0.217 * in[k+2][j+5][i];
outkc0jp3ic0 += 1.132 * in[k+2][j+5][i+1];
outkc0jp3ic0 += 0.75 * in[k+2][j+5][i+2];
out[k][j][i] = outkc0jc0ic0;
out[k][j+1][i] = outkc0jp1ic0;
out[k][j+2][i] = outkc0jp2ic0;
out[k][j+3][i] = outkc0jp3ic0;
}
}
extern "C" void host_code (double *h_in, double *h_out, int N) {
double *in;
hipMalloc (&in, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for in\n");
hipMemcpy (in, h_in, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *out;
hipMalloc (&out, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for out\n");
dim3 blockconfig (32, 4, 4);
dim3 gridconfig (ceil(N-4, blockconfig.x), ceil(N-4, 4*blockconfig.y), ceil(N-4, blockconfig.z));
hipLaunchKernelGGL(( j3d125pt), dim3(gridconfig), dim3(blockconfig), 0, 0, in, out, N);
hipMemcpy (h_out, out, sizeof(double)*N*N*N, hipMemcpyDeviceToHost);
hipFree (in);
hipFree (out);
}
| 6879e4268f7c5c269cd12495592962cb061c83e3.cu | #include <stdio.h>
#include "cuda.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
cudaError_t error = cudaGetLastError ();
if (error != cudaSuccess) {
printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error));
exit(-1);
}
}
__global__ void j3d125pt (double * __restrict__ t_in, double * __restrict__ t_out, int N) {
//Determing the block's indices
int i0 = (int)(blockIdx.x)*(int)(blockDim.x) + 2;
int i = max(i0,2) + (int)(threadIdx.x);
int j0 = 4*(int)(blockIdx.y)*(int)(blockDim.y) + 2;
int j = max(j0,2) + 4*(int)(threadIdx.y);
int k0 = (int)(blockIdx.z)*(int)(blockDim.z) + 2;
int k = max(k0,2) + (int)(threadIdx.z);
double (*in)[516][516] = (double (*)[516][516])t_in;
double (*out)[516][516] = (double (*)[516][516])t_out;
if (i>=2 && i<=N-3 && j>=2 && j<=N-3 && k>=2 && k<=N-3) {
double outkc0jc0ic0 = 0.75 * in[k-2][j-2][i-2];
outkc0jc0ic0 += 1.132 * in[k-2][j-2][i-1];
outkc0jc0ic0 += 0.217 * in[k-2][j-2][i];
outkc0jc0ic0 += 1.132 * in[k-2][j-2][i+1];
outkc0jc0ic0 += 0.75 * in[k-2][j-2][i+2];
outkc0jc0ic0 += 1.132 * in[k-1][j-2][i-2];
outkc0jc0ic0 += 0.331 * in[k-1][j-2][i-1];
outkc0jc0ic0 += 2.13 * in[k-1][j-2][i];
outkc0jc0ic0 += 0.331 * in[k-1][j-2][i+1];
outkc0jc0ic0 += 1.132 * in[k-1][j-2][i+2];
outkc0jc0ic0 += 0.217 * in[k][j-2][i-2];
outkc0jc0ic0 += 2.13 * in[k][j-2][i-1];
outkc0jc0ic0 += 1.132 * in[k][j-2][i];
outkc0jc0ic0 += 2.13 * in[k][j-2][i+1];
outkc0jc0ic0 += 0.217 * in[k][j-2][i+2];
outkc0jc0ic0 += 1.132 * in[k+1][j-2][i-2];
outkc0jc0ic0 += 0.331 * in[k+1][j-2][i-1];
outkc0jc0ic0 += 2.13 * in[k+1][j-2][i];
outkc0jc0ic0 += 0.331 * in[k+1][j-2][i+1];
outkc0jc0ic0 += 1.132 * in[k+1][j-2][i+2];
outkc0jc0ic0 += 0.75 * in[k+2][j-2][i-2];
outkc0jc0ic0 += 1.132 * in[k+2][j-2][i-1];
outkc0jc0ic0 += 0.217 * in[k+2][j-2][i];
outkc0jc0ic0 += 1.132 * in[k+2][j-2][i+1];
outkc0jc0ic0 += 0.75 * in[k+2][j-2][i+2];
outkc0jc0ic0 += 1.132 * in[k-2][j-1][i-2];
double outkc0jp1ic0 = 0.75 * in[k-2][j-1][i-2];
outkc0jc0ic0 += 0.331 * in[k-2][j-1][i-1];
outkc0jp1ic0 += 1.132 * in[k-2][j-1][i-1];
outkc0jc0ic0 += 2.13 * in[k-2][j-1][i];
outkc0jp1ic0 += 0.217 * in[k-2][j-1][i];
outkc0jc0ic0 += 0.331 * in[k-2][j-1][i+1];
outkc0jp1ic0 += 1.132 * in[k-2][j-1][i+1];
outkc0jc0ic0 += 1.132 * in[k-2][j-1][i+2];
outkc0jp1ic0 += 0.75 * in[k-2][j-1][i+2];
outkc0jc0ic0 += 0.217 * in[k-2][j][i-2];
outkc0jp1ic0 += 1.132 * in[k-2][j][i-2];
double outkc0jp2ic0 = 0.75 * in[k-2][j][i-2];
outkc0jc0ic0 += 2.13 * in[k-2][j][i-1];
outkc0jp1ic0 += 0.331 * in[k-2][j][i-1];
outkc0jp2ic0 += 1.132 * in[k-2][j][i-1];
outkc0jc0ic0 += 1.132 * in[k-2][j][i];
outkc0jp1ic0 += 2.13 * in[k-2][j][i];
outkc0jp2ic0 += 0.217 * in[k-2][j][i];
outkc0jc0ic0 += 2.13 * in[k-2][j][i+1];
outkc0jp1ic0 += 0.331 * in[k-2][j][i+1];
outkc0jp2ic0 += 1.132 * in[k-2][j][i+1];
outkc0jc0ic0 += 0.217 * in[k-2][j][i+2];
outkc0jp1ic0 += 1.132 * in[k-2][j][i+2];
outkc0jp2ic0 += 0.75 * in[k-2][j][i+2];
outkc0jc0ic0 += 1.132 * in[k-2][j+1][i-2];
outkc0jp1ic0 += 0.217 * in[k-2][j+1][i-2];
outkc0jp2ic0 += 1.132 * in[k-2][j+1][i-2];
double outkc0jp3ic0 = 0.75 * in[k-2][j+1][i-2];
outkc0jc0ic0 += 0.331 * in[k-2][j+1][i-1];
outkc0jp1ic0 += 2.13 * in[k-2][j+1][i-1];
outkc0jp2ic0 += 0.331 * in[k-2][j+1][i-1];
outkc0jp3ic0 += 1.132 * in[k-2][j+1][i-1];
outkc0jc0ic0 += 2.13 * in[k-2][j+1][i];
outkc0jp1ic0 += 1.132 * in[k-2][j+1][i];
outkc0jp2ic0 += 2.13 * in[k-2][j+1][i];
outkc0jp3ic0 += 0.217 * in[k-2][j+1][i];
outkc0jc0ic0 += 0.331 * in[k-2][j+1][i+1];
outkc0jp1ic0 += 2.13 * in[k-2][j+1][i+1];
outkc0jp2ic0 += 0.331 * in[k-2][j+1][i+1];
outkc0jp3ic0 += 1.132 * in[k-2][j+1][i+1];
outkc0jc0ic0 += 1.132 * in[k-2][j+1][i+2];
outkc0jp1ic0 += 0.217 * in[k-2][j+1][i+2];
outkc0jp2ic0 += 1.132 * in[k-2][j+1][i+2];
outkc0jp3ic0 += 0.75 * in[k-2][j+1][i+2];
outkc0jc0ic0 += 0.75 * in[k-2][j+2][i-2];
outkc0jp1ic0 += 1.132 * in[k-2][j+2][i-2];
outkc0jp2ic0 += 0.217 * in[k-2][j+2][i-2];
outkc0jp3ic0 += 1.132 * in[k-2][j+2][i-2];
outkc0jc0ic0 += 1.132 * in[k-2][j+2][i-1];
outkc0jp1ic0 += 0.331 * in[k-2][j+2][i-1];
outkc0jp2ic0 += 2.13 * in[k-2][j+2][i-1];
outkc0jp3ic0 += 0.331 * in[k-2][j+2][i-1];
outkc0jc0ic0 += 0.217 * in[k-2][j+2][i];
outkc0jp1ic0 += 2.13 * in[k-2][j+2][i];
outkc0jp2ic0 += 1.132 * in[k-2][j+2][i];
outkc0jp3ic0 += 2.13 * in[k-2][j+2][i];
outkc0jc0ic0 += 1.132 * in[k-2][j+2][i+1];
outkc0jp1ic0 += 0.331 * in[k-2][j+2][i+1];
outkc0jp2ic0 += 2.13 * in[k-2][j+2][i+1];
outkc0jp3ic0 += 0.331 * in[k-2][j+2][i+1];
outkc0jc0ic0 += 0.75 * in[k-2][j+2][i+2];
outkc0jp1ic0 += 1.132 * in[k-2][j+2][i+2];
outkc0jp2ic0 += 0.217 * in[k-2][j+2][i+2];
outkc0jp3ic0 += 1.132 * in[k-2][j+2][i+2];
outkc0jc0ic0 += 0.331 * in[k-1][j-1][i-2];
outkc0jp1ic0 += 1.132 * in[k-1][j-1][i-2];
outkc0jc0ic0 += 0.75 * in[k-1][j-1][i-1];
outkc0jp1ic0 += 0.331 * in[k-1][j-1][i-1];
outkc0jc0ic0 += 0.217 * in[k-1][j-1][i];
outkc0jp1ic0 += 2.13 * in[k-1][j-1][i];
outkc0jc0ic0 += 0.75 * in[k-1][j-1][i+1];
outkc0jp1ic0 += 0.331 * in[k-1][j-1][i+1];
outkc0jc0ic0 += 0.331 * in[k-1][j-1][i+2];
outkc0jp1ic0 += 1.132 * in[k-1][j-1][i+2];
outkc0jc0ic0 += 2.13 * in[k-1][j][i-2];
outkc0jp1ic0 += 0.331 * in[k-1][j][i-2];
outkc0jp2ic0 += 1.132 * in[k-1][j][i-2];
outkc0jc0ic0 += 0.217 * in[k-1][j][i-1];
outkc0jp1ic0 += 0.75 * in[k-1][j][i-1];
outkc0jp2ic0 += 0.331 * in[k-1][j][i-1];
outkc0jc0ic0 += 0.331 * in[k-1][j][i];
outkc0jp1ic0 += 0.217 * in[k-1][j][i];
outkc0jp2ic0 += 2.13 * in[k-1][j][i];
outkc0jc0ic0 += 0.217 * in[k-1][j][i+1];
outkc0jp1ic0 += 0.75 * in[k-1][j][i+1];
outkc0jp2ic0 += 0.331 * in[k-1][j][i+1];
outkc0jc0ic0 += 2.13 * in[k-1][j][i+2];
outkc0jp1ic0 += 0.331 * in[k-1][j][i+2];
outkc0jp2ic0 += 1.132 * in[k-1][j][i+2];
outkc0jc0ic0 += 0.331 * in[k-1][j+1][i-2];
outkc0jp1ic0 += 2.13 * in[k-1][j+1][i-2];
outkc0jp2ic0 += 0.331 * in[k-1][j+1][i-2];
outkc0jp3ic0 += 1.132 * in[k-1][j+1][i-2];
outkc0jc0ic0 += 0.75 * in[k-1][j+1][i-1];
outkc0jp1ic0 += 0.217 * in[k-1][j+1][i-1];
outkc0jp2ic0 += 0.75 * in[k-1][j+1][i-1];
outkc0jp3ic0 += 0.331 * in[k-1][j+1][i-1];
outkc0jc0ic0 += 0.217 * in[k-1][j+1][i];
outkc0jp1ic0 += 0.331 * in[k-1][j+1][i];
outkc0jp2ic0 += 0.217 * in[k-1][j+1][i];
outkc0jp3ic0 += 2.13 * in[k-1][j+1][i];
outkc0jc0ic0 += 0.75 * in[k-1][j+1][i+1];
outkc0jp1ic0 += 0.217 * in[k-1][j+1][i+1];
outkc0jp2ic0 += 0.75 * in[k-1][j+1][i+1];
outkc0jp3ic0 += 0.331 * in[k-1][j+1][i+1];
outkc0jc0ic0 += 0.331 * in[k-1][j+1][i+2];
outkc0jp1ic0 += 2.13 * in[k-1][j+1][i+2];
outkc0jp2ic0 += 0.331 * in[k-1][j+1][i+2];
outkc0jp3ic0 += 1.132 * in[k-1][j+1][i+2];
outkc0jc0ic0 += 1.132 * in[k-1][j+2][i-2];
outkc0jp1ic0 += 0.331 * in[k-1][j+2][i-2];
outkc0jp2ic0 += 2.13 * in[k-1][j+2][i-2];
outkc0jp3ic0 += 0.331 * in[k-1][j+2][i-2];
outkc0jc0ic0 += 0.331 * in[k-1][j+2][i-1];
outkc0jp1ic0 += 0.75 * in[k-1][j+2][i-1];
outkc0jp2ic0 += 0.217 * in[k-1][j+2][i-1];
outkc0jp3ic0 += 0.75 * in[k-1][j+2][i-1];
outkc0jc0ic0 += 2.13 * in[k-1][j+2][i];
outkc0jp1ic0 += 0.217 * in[k-1][j+2][i];
outkc0jp2ic0 += 0.331 * in[k-1][j+2][i];
outkc0jp3ic0 += 0.217 * in[k-1][j+2][i];
outkc0jc0ic0 += 0.331 * in[k-1][j+2][i+1];
outkc0jp1ic0 += 0.75 * in[k-1][j+2][i+1];
outkc0jp2ic0 += 0.217 * in[k-1][j+2][i+1];
outkc0jp3ic0 += 0.75 * in[k-1][j+2][i+1];
outkc0jc0ic0 += 1.132 * in[k-1][j+2][i+2];
outkc0jp1ic0 += 0.331 * in[k-1][j+2][i+2];
outkc0jp2ic0 += 2.13 * in[k-1][j+2][i+2];
outkc0jp3ic0 += 0.331 * in[k-1][j+2][i+2];
outkc0jc0ic0 += 2.13 * in[k][j-1][i-2];
outkc0jp1ic0 += 0.217 * in[k][j-1][i-2];
outkc0jc0ic0 += 0.331 * in[k][j-1][i-1];
outkc0jp1ic0 += 2.13 * in[k][j-1][i-1];
outkc0jc0ic0 += 0.75 * in[k][j-1][i];
outkc0jp1ic0 += 1.132 * in[k][j-1][i];
outkc0jc0ic0 += 0.331 * in[k][j-1][i+1];
outkc0jp1ic0 += 2.13 * in[k][j-1][i+1];
outkc0jc0ic0 += 2.13 * in[k][j-1][i+2];
outkc0jp1ic0 += 0.217 * in[k][j-1][i+2];
outkc0jc0ic0 += 1.132 * in[k][j][i-2];
outkc0jp1ic0 += 2.13 * in[k][j][i-2];
outkc0jp2ic0 += 0.217 * in[k][j][i-2];
outkc0jc0ic0 += 0.75 * in[k][j][i-1];
outkc0jp1ic0 += 0.331 * in[k][j][i-1];
outkc0jp2ic0 += 2.13 * in[k][j][i-1];
outkc0jc0ic0 += 2.13 * in[k][j][i];
outkc0jp1ic0 += 0.75 * in[k][j][i];
outkc0jp2ic0 += 1.132 * in[k][j][i];
outkc0jc0ic0 += 0.75 * in[k][j][i+1];
outkc0jp1ic0 += 0.331 * in[k][j][i+1];
outkc0jp2ic0 += 2.13 * in[k][j][i+1];
outkc0jc0ic0 += 1.132 * in[k][j][i+2];
outkc0jp1ic0 += 2.13 * in[k][j][i+2];
outkc0jp2ic0 += 0.217 * in[k][j][i+2];
outkc0jc0ic0 += 2.13 * in[k][j+1][i-2];
outkc0jp1ic0 += 1.132 * in[k][j+1][i-2];
outkc0jp2ic0 += 2.13 * in[k][j+1][i-2];
outkc0jp3ic0 += 0.217 * in[k][j+1][i-2];
outkc0jc0ic0 += 0.331 * in[k][j+1][i-1];
outkc0jp1ic0 += 0.75 * in[k][j+1][i-1];
outkc0jp2ic0 += 0.331 * in[k][j+1][i-1];
outkc0jp3ic0 += 2.13 * in[k][j+1][i-1];
outkc0jc0ic0 += 0.75 * in[k][j+1][i];
outkc0jp1ic0 += 2.13 * in[k][j+1][i];
outkc0jp2ic0 += 0.75 * in[k][j+1][i];
outkc0jp3ic0 += 1.132 * in[k][j+1][i];
outkc0jc0ic0 += 0.331 * in[k][j+1][i+1];
outkc0jp1ic0 += 0.75 * in[k][j+1][i+1];
outkc0jp2ic0 += 0.331 * in[k][j+1][i+1];
outkc0jp3ic0 += 2.13 * in[k][j+1][i+1];
outkc0jc0ic0 += 2.13 * in[k][j+1][i+2];
outkc0jp1ic0 += 1.132 * in[k][j+1][i+2];
outkc0jp2ic0 += 2.13 * in[k][j+1][i+2];
outkc0jp3ic0 += 0.217 * in[k][j+1][i+2];
outkc0jc0ic0 += 0.217 * in[k][j+2][i-2];
outkc0jp1ic0 += 2.13 * in[k][j+2][i-2];
outkc0jp2ic0 += 1.132 * in[k][j+2][i-2];
outkc0jp3ic0 += 2.13 * in[k][j+2][i-2];
outkc0jc0ic0 += 2.13 * in[k][j+2][i-1];
outkc0jp1ic0 += 0.331 * in[k][j+2][i-1];
outkc0jp2ic0 += 0.75 * in[k][j+2][i-1];
outkc0jp3ic0 += 0.331 * in[k][j+2][i-1];
outkc0jc0ic0 += 1.132 * in[k][j+2][i];
outkc0jp1ic0 += 0.75 * in[k][j+2][i];
outkc0jp2ic0 += 2.13 * in[k][j+2][i];
outkc0jp3ic0 += 0.75 * in[k][j+2][i];
outkc0jc0ic0 += 2.13 * in[k][j+2][i+1];
outkc0jp1ic0 += 0.331 * in[k][j+2][i+1];
outkc0jp2ic0 += 0.75 * in[k][j+2][i+1];
outkc0jp3ic0 += 0.331 * in[k][j+2][i+1];
outkc0jc0ic0 += 0.217 * in[k][j+2][i+2];
outkc0jp1ic0 += 2.13 * in[k][j+2][i+2];
outkc0jp2ic0 += 1.132 * in[k][j+2][i+2];
outkc0jp3ic0 += 2.13 * in[k][j+2][i+2];
outkc0jc0ic0 += 0.331 * in[k+1][j-1][i-2];
outkc0jp1ic0 += 1.132 * in[k+1][j-1][i-2];
outkc0jc0ic0 += 0.75 * in[k+1][j-1][i-1];
outkc0jp1ic0 += 0.331 * in[k+1][j-1][i-1];
outkc0jc0ic0 += 0.217 * in[k+1][j-1][i];
outkc0jp1ic0 += 2.13 * in[k+1][j-1][i];
outkc0jc0ic0 += 0.75 * in[k+1][j-1][i+1];
outkc0jp1ic0 += 0.331 * in[k+1][j-1][i+1];
outkc0jc0ic0 += 0.331 * in[k+1][j-1][i+2];
outkc0jp1ic0 += 1.132 * in[k+1][j-1][i+2];
outkc0jc0ic0 += 2.13 * in[k+1][j][i-2];
outkc0jp1ic0 += 0.331 * in[k+1][j][i-2];
outkc0jp2ic0 += 1.132 * in[k+1][j][i-2];
outkc0jc0ic0 += 0.217 * in[k+1][j][i-1];
outkc0jp1ic0 += 0.75 * in[k+1][j][i-1];
outkc0jp2ic0 += 0.331 * in[k+1][j][i-1];
outkc0jc0ic0 += 0.331 * in[k+1][j][i];
outkc0jp1ic0 += 0.217 * in[k+1][j][i];
outkc0jp2ic0 += 2.13 * in[k+1][j][i];
outkc0jc0ic0 += 0.217 * in[k+1][j][i+1];
outkc0jp1ic0 += 0.75 * in[k+1][j][i+1];
outkc0jp2ic0 += 0.331 * in[k+1][j][i+1];
outkc0jc0ic0 += 2.13 * in[k+1][j][i+2];
outkc0jp1ic0 += 0.331 * in[k+1][j][i+2];
outkc0jp2ic0 += 1.132 * in[k+1][j][i+2];
outkc0jc0ic0 += 0.331 * in[k+1][j+1][i-2];
outkc0jp1ic0 += 2.13 * in[k+1][j+1][i-2];
outkc0jp2ic0 += 0.331 * in[k+1][j+1][i-2];
outkc0jp3ic0 += 1.132 * in[k+1][j+1][i-2];
outkc0jc0ic0 += 0.75 * in[k+1][j+1][i-1];
outkc0jp1ic0 += 0.217 * in[k+1][j+1][i-1];
outkc0jp2ic0 += 0.75 * in[k+1][j+1][i-1];
outkc0jp3ic0 += 0.331 * in[k+1][j+1][i-1];
outkc0jc0ic0 += 0.217 * in[k+1][j+1][i];
outkc0jp1ic0 += 0.331 * in[k+1][j+1][i];
outkc0jp2ic0 += 0.217 * in[k+1][j+1][i];
outkc0jp3ic0 += 2.13 * in[k+1][j+1][i];
outkc0jc0ic0 += 0.75 * in[k+1][j+1][i+1];
outkc0jp1ic0 += 0.217 * in[k+1][j+1][i+1];
outkc0jp2ic0 += 0.75 * in[k+1][j+1][i+1];
outkc0jp3ic0 += 0.331 * in[k+1][j+1][i+1];
outkc0jc0ic0 += 0.331 * in[k+1][j+1][i+2];
outkc0jp1ic0 += 2.13 * in[k+1][j+1][i+2];
outkc0jp2ic0 += 0.331 * in[k+1][j+1][i+2];
outkc0jp3ic0 += 1.132 * in[k+1][j+1][i+2];
outkc0jc0ic0 += 1.132 * in[k+1][j+2][i-2];
outkc0jp1ic0 += 0.331 * in[k+1][j+2][i-2];
outkc0jp2ic0 += 2.13 * in[k+1][j+2][i-2];
outkc0jp3ic0 += 0.331 * in[k+1][j+2][i-2];
outkc0jc0ic0 += 0.331 * in[k+1][j+2][i-1];
outkc0jp1ic0 += 0.75 * in[k+1][j+2][i-1];
outkc0jp2ic0 += 0.217 * in[k+1][j+2][i-1];
outkc0jp3ic0 += 0.75 * in[k+1][j+2][i-1];
outkc0jc0ic0 += 2.13 * in[k+1][j+2][i];
outkc0jp1ic0 += 0.217 * in[k+1][j+2][i];
outkc0jp2ic0 += 0.331 * in[k+1][j+2][i];
outkc0jp3ic0 += 0.217 * in[k+1][j+2][i];
outkc0jc0ic0 += 0.331 * in[k+1][j+2][i+1];
outkc0jp1ic0 += 0.75 * in[k+1][j+2][i+1];
outkc0jp2ic0 += 0.217 * in[k+1][j+2][i+1];
outkc0jp3ic0 += 0.75 * in[k+1][j+2][i+1];
outkc0jc0ic0 += 1.132 * in[k+1][j+2][i+2];
outkc0jp1ic0 += 0.331 * in[k+1][j+2][i+2];
outkc0jp2ic0 += 2.13 * in[k+1][j+2][i+2];
outkc0jp3ic0 += 0.331 * in[k+1][j+2][i+2];
outkc0jc0ic0 += 1.132 * in[k+2][j-1][i-2];
outkc0jp1ic0 += 0.75 * in[k+2][j-1][i-2];
outkc0jc0ic0 += 0.331 * in[k+2][j-1][i-1];
outkc0jp1ic0 += 1.132 * in[k+2][j-1][i-1];
outkc0jc0ic0 += 2.13 * in[k+2][j-1][i];
outkc0jp1ic0 += 0.217 * in[k+2][j-1][i];
outkc0jc0ic0 += 0.331 * in[k+2][j-1][i+1];
outkc0jp1ic0 += 1.132 * in[k+2][j-1][i+1];
outkc0jc0ic0 += 1.132 * in[k+2][j-1][i+2];
outkc0jp1ic0 += 0.75 * in[k+2][j-1][i+2];
outkc0jc0ic0 += 0.217 * in[k+2][j][i-2];
outkc0jp1ic0 += 1.132 * in[k+2][j][i-2];
outkc0jp2ic0 += 0.75 * in[k+2][j][i-2];
outkc0jc0ic0 += 2.13 * in[k+2][j][i-1];
outkc0jp1ic0 += 0.331 * in[k+2][j][i-1];
outkc0jp2ic0 += 1.132 * in[k+2][j][i-1];
outkc0jc0ic0 += 1.132 * in[k+2][j][i];
outkc0jp1ic0 += 2.13 * in[k+2][j][i];
outkc0jp2ic0 += 0.217 * in[k+2][j][i];
outkc0jc0ic0 += 2.13 * in[k+2][j][i+1];
outkc0jp1ic0 += 0.331 * in[k+2][j][i+1];
outkc0jp2ic0 += 1.132 * in[k+2][j][i+1];
outkc0jc0ic0 += 0.217 * in[k+2][j][i+2];
outkc0jp1ic0 += 1.132 * in[k+2][j][i+2];
outkc0jp2ic0 += 0.75 * in[k+2][j][i+2];
outkc0jc0ic0 += 1.132 * in[k+2][j+1][i-2];
outkc0jp1ic0 += 0.217 * in[k+2][j+1][i-2];
outkc0jp2ic0 += 1.132 * in[k+2][j+1][i-2];
outkc0jp3ic0 += 0.75 * in[k+2][j+1][i-2];
outkc0jc0ic0 += 0.331 * in[k+2][j+1][i-1];
outkc0jp1ic0 += 2.13 * in[k+2][j+1][i-1];
outkc0jp2ic0 += 0.331 * in[k+2][j+1][i-1];
outkc0jp3ic0 += 1.132 * in[k+2][j+1][i-1];
outkc0jc0ic0 += 2.13 * in[k+2][j+1][i];
outkc0jp1ic0 += 1.132 * in[k+2][j+1][i];
outkc0jp2ic0 += 2.13 * in[k+2][j+1][i];
outkc0jp3ic0 += 0.217 * in[k+2][j+1][i];
outkc0jc0ic0 += 0.331 * in[k+2][j+1][i+1];
outkc0jp1ic0 += 2.13 * in[k+2][j+1][i+1];
outkc0jp2ic0 += 0.331 * in[k+2][j+1][i+1];
outkc0jp3ic0 += 1.132 * in[k+2][j+1][i+1];
outkc0jc0ic0 += 1.132 * in[k+2][j+1][i+2];
outkc0jp1ic0 += 0.217 * in[k+2][j+1][i+2];
outkc0jp2ic0 += 1.132 * in[k+2][j+1][i+2];
outkc0jp3ic0 += 0.75 * in[k+2][j+1][i+2];
outkc0jc0ic0 += 0.75 * in[k+2][j+2][i-2];
outkc0jp1ic0 += 1.132 * in[k+2][j+2][i-2];
outkc0jp2ic0 += 0.217 * in[k+2][j+2][i-2];
outkc0jp3ic0 += 1.132 * in[k+2][j+2][i-2];
outkc0jc0ic0 += 1.132 * in[k+2][j+2][i-1];
outkc0jp1ic0 += 0.331 * in[k+2][j+2][i-1];
outkc0jp2ic0 += 2.13 * in[k+2][j+2][i-1];
outkc0jp3ic0 += 0.331 * in[k+2][j+2][i-1];
outkc0jc0ic0 += 0.217 * in[k+2][j+2][i];
outkc0jp1ic0 += 2.13 * in[k+2][j+2][i];
outkc0jp2ic0 += 1.132 * in[k+2][j+2][i];
outkc0jp3ic0 += 2.13 * in[k+2][j+2][i];
outkc0jc0ic0 += 1.132 * in[k+2][j+2][i+1];
outkc0jp1ic0 += 0.331 * in[k+2][j+2][i+1];
outkc0jp2ic0 += 2.13 * in[k+2][j+2][i+1];
outkc0jp3ic0 += 0.331 * in[k+2][j+2][i+1];
outkc0jc0ic0 += 0.75 * in[k+2][j+2][i+2];
outkc0jp1ic0 += 1.132 * in[k+2][j+2][i+2];
outkc0jp2ic0 += 0.217 * in[k+2][j+2][i+2];
outkc0jp3ic0 += 1.132 * in[k+2][j+2][i+2];
outkc0jp1ic0 += 0.75 * in[k-2][j+3][i-2];
outkc0jp2ic0 += 1.132 * in[k-2][j+3][i-2];
outkc0jp3ic0 += 0.217 * in[k-2][j+3][i-2];
outkc0jp1ic0 += 1.132 * in[k-2][j+3][i-1];
outkc0jp2ic0 += 0.331 * in[k-2][j+3][i-1];
outkc0jp3ic0 += 2.13 * in[k-2][j+3][i-1];
outkc0jp1ic0 += 0.217 * in[k-2][j+3][i];
outkc0jp2ic0 += 2.13 * in[k-2][j+3][i];
outkc0jp3ic0 += 1.132 * in[k-2][j+3][i];
outkc0jp1ic0 += 1.132 * in[k-2][j+3][i+1];
outkc0jp2ic0 += 0.331 * in[k-2][j+3][i+1];
outkc0jp3ic0 += 2.13 * in[k-2][j+3][i+1];
outkc0jp1ic0 += 0.75 * in[k-2][j+3][i+2];
outkc0jp2ic0 += 1.132 * in[k-2][j+3][i+2];
outkc0jp3ic0 += 0.217 * in[k-2][j+3][i+2];
outkc0jp1ic0 += 1.132 * in[k-1][j+3][i-2];
outkc0jp2ic0 += 0.331 * in[k-1][j+3][i-2];
outkc0jp3ic0 += 2.13 * in[k-1][j+3][i-2];
outkc0jp1ic0 += 0.331 * in[k-1][j+3][i-1];
outkc0jp2ic0 += 0.75 * in[k-1][j+3][i-1];
outkc0jp3ic0 += 0.217 * in[k-1][j+3][i-1];
outkc0jp1ic0 += 2.13 * in[k-1][j+3][i];
outkc0jp2ic0 += 0.217 * in[k-1][j+3][i];
outkc0jp3ic0 += 0.331 * in[k-1][j+3][i];
outkc0jp1ic0 += 0.331 * in[k-1][j+3][i+1];
outkc0jp2ic0 += 0.75 * in[k-1][j+3][i+1];
outkc0jp3ic0 += 0.217 * in[k-1][j+3][i+1];
outkc0jp1ic0 += 1.132 * in[k-1][j+3][i+2];
outkc0jp2ic0 += 0.331 * in[k-1][j+3][i+2];
outkc0jp3ic0 += 2.13 * in[k-1][j+3][i+2];
outkc0jp1ic0 += 0.217 * in[k][j+3][i-2];
outkc0jp2ic0 += 2.13 * in[k][j+3][i-2];
outkc0jp3ic0 += 1.132 * in[k][j+3][i-2];
outkc0jp1ic0 += 2.13 * in[k][j+3][i-1];
outkc0jp2ic0 += 0.331 * in[k][j+3][i-1];
outkc0jp3ic0 += 0.75 * in[k][j+3][i-1];
outkc0jp1ic0 += 1.132 * in[k][j+3][i];
outkc0jp2ic0 += 0.75 * in[k][j+3][i];
outkc0jp3ic0 += 2.13 * in[k][j+3][i];
outkc0jp1ic0 += 2.13 * in[k][j+3][i+1];
outkc0jp2ic0 += 0.331 * in[k][j+3][i+1];
outkc0jp3ic0 += 0.75 * in[k][j+3][i+1];
outkc0jp1ic0 += 0.217 * in[k][j+3][i+2];
outkc0jp2ic0 += 2.13 * in[k][j+3][i+2];
outkc0jp3ic0 += 1.132 * in[k][j+3][i+2];
outkc0jp1ic0 += 1.132 * in[k+1][j+3][i-2];
outkc0jp2ic0 += 0.331 * in[k+1][j+3][i-2];
outkc0jp3ic0 += 2.13 * in[k+1][j+3][i-2];
outkc0jp1ic0 += 0.331 * in[k+1][j+3][i-1];
outkc0jp2ic0 += 0.75 * in[k+1][j+3][i-1];
outkc0jp3ic0 += 0.217 * in[k+1][j+3][i-1];
outkc0jp1ic0 += 2.13 * in[k+1][j+3][i];
outkc0jp2ic0 += 0.217 * in[k+1][j+3][i];
outkc0jp3ic0 += 0.331 * in[k+1][j+3][i];
outkc0jp1ic0 += 0.331 * in[k+1][j+3][i+1];
outkc0jp2ic0 += 0.75 * in[k+1][j+3][i+1];
outkc0jp3ic0 += 0.217 * in[k+1][j+3][i+1];
outkc0jp1ic0 += 1.132 * in[k+1][j+3][i+2];
outkc0jp2ic0 += 0.331 * in[k+1][j+3][i+2];
outkc0jp3ic0 += 2.13 * in[k+1][j+3][i+2];
outkc0jp1ic0 += 0.75 * in[k+2][j+3][i-2];
outkc0jp2ic0 += 1.132 * in[k+2][j+3][i-2];
outkc0jp3ic0 += 0.217 * in[k+2][j+3][i-2];
outkc0jp1ic0 += 1.132 * in[k+2][j+3][i-1];
outkc0jp2ic0 += 0.331 * in[k+2][j+3][i-1];
outkc0jp3ic0 += 2.13 * in[k+2][j+3][i-1];
outkc0jp1ic0 += 0.217 * in[k+2][j+3][i];
outkc0jp2ic0 += 2.13 * in[k+2][j+3][i];
outkc0jp3ic0 += 1.132 * in[k+2][j+3][i];
outkc0jp1ic0 += 1.132 * in[k+2][j+3][i+1];
outkc0jp2ic0 += 0.331 * in[k+2][j+3][i+1];
outkc0jp3ic0 += 2.13 * in[k+2][j+3][i+1];
outkc0jp1ic0 += 0.75 * in[k+2][j+3][i+2];
outkc0jp2ic0 += 1.132 * in[k+2][j+3][i+2];
outkc0jp3ic0 += 0.217 * in[k+2][j+3][i+2];
outkc0jp2ic0 += 0.75 * in[k-2][j+4][i-2];
outkc0jp3ic0 += 1.132 * in[k-2][j+4][i-2];
outkc0jp2ic0 += 1.132 * in[k-2][j+4][i-1];
outkc0jp3ic0 += 0.331 * in[k-2][j+4][i-1];
outkc0jp2ic0 += 0.217 * in[k-2][j+4][i];
outkc0jp3ic0 += 2.13 * in[k-2][j+4][i];
outkc0jp2ic0 += 1.132 * in[k-2][j+4][i+1];
outkc0jp3ic0 += 0.331 * in[k-2][j+4][i+1];
outkc0jp2ic0 += 0.75 * in[k-2][j+4][i+2];
outkc0jp3ic0 += 1.132 * in[k-2][j+4][i+2];
outkc0jp2ic0 += 1.132 * in[k-1][j+4][i-2];
outkc0jp3ic0 += 0.331 * in[k-1][j+4][i-2];
outkc0jp2ic0 += 0.331 * in[k-1][j+4][i-1];
outkc0jp3ic0 += 0.75 * in[k-1][j+4][i-1];
outkc0jp2ic0 += 2.13 * in[k-1][j+4][i];
outkc0jp3ic0 += 0.217 * in[k-1][j+4][i];
outkc0jp2ic0 += 0.331 * in[k-1][j+4][i+1];
outkc0jp3ic0 += 0.75 * in[k-1][j+4][i+1];
outkc0jp2ic0 += 1.132 * in[k-1][j+4][i+2];
outkc0jp3ic0 += 0.331 * in[k-1][j+4][i+2];
outkc0jp2ic0 += 0.217 * in[k][j+4][i-2];
outkc0jp3ic0 += 2.13 * in[k][j+4][i-2];
outkc0jp2ic0 += 2.13 * in[k][j+4][i-1];
outkc0jp3ic0 += 0.331 * in[k][j+4][i-1];
outkc0jp2ic0 += 1.132 * in[k][j+4][i];
outkc0jp3ic0 += 0.75 * in[k][j+4][i];
outkc0jp2ic0 += 2.13 * in[k][j+4][i+1];
outkc0jp3ic0 += 0.331 * in[k][j+4][i+1];
outkc0jp2ic0 += 0.217 * in[k][j+4][i+2];
outkc0jp3ic0 += 2.13 * in[k][j+4][i+2];
outkc0jp2ic0 += 1.132 * in[k+1][j+4][i-2];
outkc0jp3ic0 += 0.331 * in[k+1][j+4][i-2];
outkc0jp2ic0 += 0.331 * in[k+1][j+4][i-1];
outkc0jp3ic0 += 0.75 * in[k+1][j+4][i-1];
outkc0jp2ic0 += 2.13 * in[k+1][j+4][i];
outkc0jp3ic0 += 0.217 * in[k+1][j+4][i];
outkc0jp2ic0 += 0.331 * in[k+1][j+4][i+1];
outkc0jp3ic0 += 0.75 * in[k+1][j+4][i+1];
outkc0jp2ic0 += 1.132 * in[k+1][j+4][i+2];
outkc0jp3ic0 += 0.331 * in[k+1][j+4][i+2];
outkc0jp2ic0 += 0.75 * in[k+2][j+4][i-2];
outkc0jp3ic0 += 1.132 * in[k+2][j+4][i-2];
outkc0jp2ic0 += 1.132 * in[k+2][j+4][i-1];
outkc0jp3ic0 += 0.331 * in[k+2][j+4][i-1];
outkc0jp2ic0 += 0.217 * in[k+2][j+4][i];
outkc0jp3ic0 += 2.13 * in[k+2][j+4][i];
outkc0jp2ic0 += 1.132 * in[k+2][j+4][i+1];
outkc0jp3ic0 += 0.331 * in[k+2][j+4][i+1];
outkc0jp2ic0 += 0.75 * in[k+2][j+4][i+2];
outkc0jp3ic0 += 1.132 * in[k+2][j+4][i+2];
outkc0jp3ic0 += 0.75 * in[k-2][j+5][i-2];
outkc0jp3ic0 += 1.132 * in[k-2][j+5][i-1];
outkc0jp3ic0 += 0.217 * in[k-2][j+5][i];
outkc0jp3ic0 += 1.132 * in[k-2][j+5][i+1];
outkc0jp3ic0 += 0.75 * in[k-2][j+5][i+2];
outkc0jp3ic0 += 1.132 * in[k-1][j+5][i-2];
outkc0jp3ic0 += 0.331 * in[k-1][j+5][i-1];
outkc0jp3ic0 += 2.13 * in[k-1][j+5][i];
outkc0jp3ic0 += 0.331 * in[k-1][j+5][i+1];
outkc0jp3ic0 += 1.132 * in[k-1][j+5][i+2];
outkc0jp3ic0 += 0.217 * in[k][j+5][i-2];
outkc0jp3ic0 += 2.13 * in[k][j+5][i-1];
outkc0jp3ic0 += 1.132 * in[k][j+5][i];
outkc0jp3ic0 += 2.13 * in[k][j+5][i+1];
outkc0jp3ic0 += 0.217 * in[k][j+5][i+2];
outkc0jp3ic0 += 1.132 * in[k+1][j+5][i-2];
outkc0jp3ic0 += 0.331 * in[k+1][j+5][i-1];
outkc0jp3ic0 += 2.13 * in[k+1][j+5][i];
outkc0jp3ic0 += 0.331 * in[k+1][j+5][i+1];
outkc0jp3ic0 += 1.132 * in[k+1][j+5][i+2];
outkc0jp3ic0 += 0.75 * in[k+2][j+5][i-2];
outkc0jp3ic0 += 1.132 * in[k+2][j+5][i-1];
outkc0jp3ic0 += 0.217 * in[k+2][j+5][i];
outkc0jp3ic0 += 1.132 * in[k+2][j+5][i+1];
outkc0jp3ic0 += 0.75 * in[k+2][j+5][i+2];
out[k][j][i] = outkc0jc0ic0;
out[k][j+1][i] = outkc0jp1ic0;
out[k][j+2][i] = outkc0jp2ic0;
out[k][j+3][i] = outkc0jp3ic0;
}
}
extern "C" void host_code (double *h_in, double *h_out, int N) {
double *in;
cudaMalloc (&in, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for in\n");
cudaMemcpy (in, h_in, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *out;
cudaMalloc (&out, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for out\n");
dim3 blockconfig (32, 4, 4);
dim3 gridconfig (ceil(N-4, blockconfig.x), ceil(N-4, 4*blockconfig.y), ceil(N-4, blockconfig.z));
j3d125pt<<<gridconfig, blockconfig>>> (in, out, N);
cudaMemcpy (h_out, out, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost);
cudaFree (in);
cudaFree (out);
}
|
1f865badfd92a9ef8f47096c192761e8e706d7f2.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <deviceFunctions.h>
int main()
{
printDeviceInfo();
}
| 1f865badfd92a9ef8f47096c192761e8e706d7f2.cu | #include <stdio.h>
#include <cuda_runtime.h>
#include <deviceFunctions.h>
int main()
{
printDeviceInfo();
}
|
511bee0b1a0d6289f204fc25d43ca804f4f75bdb.hip | // !!! This is a file automatically generated by hipify!!!
#include "device_launch_parameters.h"
#ifndef __HIPCC__
#define __HIPCC__
#endif
#include "hip/hip_runtime.h"
#include "hip/device_functions.h"
#include <stdio.h>
#include <stdlib.h>
#include <windows.h>
#include <iostream>
#include <fstream>
////////////////////////////////////////////////////
//GPU
#define HostToDevice hipMemcpyHostToDevice
#define DeviceToHost hipMemcpyDeviceToHost
#define OK hipSuccess
#define NEW_LINE '\n'
#define SIZE 5
//CPU FUNCTIONS
void Show_Matrix(const __int32* const* const Matrix);
void Fill_Matrix(__int32* const* const Matrix, const __int32 initial_value);
void Fill_Matrix(__int32* const* const Matrix, const _STD string & file_path);
//GPU FUNCTIONS
__global__ void Show_Matrix_GPU(const __int32* const Matrix);
__global__ void Multiply_Matrices(const __int32* const Matrix_GPU_A, const __int32* const Matrix_GPU_B, __int32* const Matrix_GPU_C);
int main(int argc, char* argv[])
{
/*
ALLOCATING NEEDED MEMORY ON CPU (exactly in RAM)
*/
__int32** Matrix_CPU_A;
Matrix_CPU_A = (__int32**)malloc(SIZE * sizeof(__int32*));
__int32** Matrix_CPU_B;
Matrix_CPU_B = (__int32**)malloc(SIZE * sizeof(__int32*));
__int32** Matrix_CPU_C;
Matrix_CPU_C = (__int32**)malloc(SIZE * sizeof(__int32*));
for (size_t i = 0; i < SIZE; ++i)
{
*(Matrix_CPU_A + i) = (__int32*)malloc(SIZE * sizeof(__int32));
*(Matrix_CPU_B + i) = (__int32*)malloc(SIZE * sizeof(__int32));
*(Matrix_CPU_C + i) = (__int32*)malloc(SIZE * sizeof(__int32));
//Matrix_CPU[i] = (__int32*)malloc(SIZE * sizeof(__int32));
}
////////////////////////////////////////////////////
//FILL MATRIX WITH RANDOM VALUES
Fill_Matrix(Matrix_CPU_A, "file.in");
Fill_Matrix(Matrix_CPU_B, "file1.in");
Fill_Matrix(Matrix_CPU_C, 0);
//SHOW FILLED UP ARRAY
Show_Matrix(Matrix_CPU_A);
Show_Matrix(Matrix_CPU_B);
Show_Matrix(Matrix_CPU_C);
/*
ALLOCATING NEEDED MEMORY ON GPU
*/
__int32* Matrix_GPU_A;
hipMalloc((void**)&Matrix_GPU_A, (SIZE * SIZE) * sizeof(__int32)); //GPU interprets 2D array as a flat array !
__int32* Matrix_GPU_B;
hipMalloc((void**)&Matrix_GPU_B, (SIZE * SIZE) * sizeof(__int32));
__int32* Matrix_GPU_C;
hipMalloc((void**)&Matrix_GPU_C, (SIZE * SIZE) * sizeof(__int32));
////////////////////////////////////////////////////
//COPY CPU ARRAY TO GPU
//HANDLE_ERROR(hipMemcpy(Matrix_GPU, Matrix_CPU, (SIZE * SIZE) * sizeof(__int32), HostToDevice)); //only for statics array from HOST's!
for (size_t i = 0; i < SIZE; ++i)
{
hipMemcpy(Matrix_GPU_A + i * SIZE, *(Matrix_CPU_A + i), sizeof(__int32) * SIZE, HostToDevice);
hipMemcpy(Matrix_GPU_B + i * SIZE, *(Matrix_CPU_B + i), sizeof(__int32) * SIZE, HostToDevice);
hipMemcpy(Matrix_GPU_C + i * SIZE, *(Matrix_CPU_C + i), sizeof(__int32) * SIZE, HostToDevice);
}
////////////////////////////////////////////////////
dim3 blocks(SIZE, SIZE);
dim3 threads(SIZE);
//MULTIPLY THE MATRICES
hipLaunchKernelGGL(( Multiply_Matrices) , dim3(blocks), dim3(threads) , 0, 0, Matrix_GPU_A, Matrix_GPU_B, Matrix_GPU_C);
//COPY FROM GPU TO CPU
//HANDLE_ERROR(hipMemcpy(Matrix_CPU, Matrix_GPU, (SIZE * SIZE) * sizeof(__int32), DeviceToHost)); //only for statics array
//copying data from GPU to CPU
for (size_t i = 0; i < SIZE; ++i)
{
hipMemcpy(*(Matrix_CPU_C + i), Matrix_GPU_C + i * SIZE, sizeof(__int32) * SIZE, DeviceToHost); //for dynamic allocation, only result matrix
}
////////////////////////////////////////////////////
//SHOW RESULTS
Show_Matrix(Matrix_CPU_C);
/*
FREEING PREVIOUSLY ALOCATED MEMORY
*/
//ON CPU
for (size_t i = 0; i < SIZE; ++i)
{
free(*(Matrix_CPU_A + i));
free(*(Matrix_CPU_B + i));
free(*(Matrix_CPU_C + i));
}
free(Matrix_CPU_A);
free(Matrix_CPU_B);
free(Matrix_CPU_C);
//ON GPU
hipFree(Matrix_GPU_A);
hipFree(Matrix_GPU_B);
hipFree(Matrix_GPU_C);
////////////////////////////////////////////////////
system("pause");
return 0;
}
void Show_Matrix(const __int32* const* const Matrix)
{
for (size_t i = 0; i < SIZE; ++i)
{
_STD cout << "| ";
for (size_t j = 0; j < SIZE; ++j)
{
_STD cout << Matrix[i][j] << " | ";
}
_STD cout << NEW_LINE;
}
_STD cout << NEW_LINE;
}
void Fill_Matrix(__int32* const* const Matrix, const __int32 initial_value)
{
for (size_t i = 0; i < SIZE; ++i)
{
for (size_t j = 0; j < SIZE; ++j)
{
//Matrix[i][j] = (i*SIZE)+j+initial_value;
Matrix[i][j] = initial_value;
}
}
}
void Fill_Matrix(__int32* const* const Matrix, const::std::string& file_path)
{
std::fstream file_in;
file_in.open(file_path.c_str(), std::ios_base::in);
size_t i{};
size_t j{};
if (file_in.good() == false)
{
exit(0);
}
else
{
while (file_in.eof() == false)
{
while (i < SIZE)
{
j = 0;
while (j < SIZE)
{
file_in >> Matrix[i][j];
++j;
}
++i;
}
j = NULL;
i = NULL;
}
}
file_in.close();
}
__global__ void Show_Matrix_GPU(const __int32* const Matrix)
{
int id_x = threadIdx.x + blockIdx.x * blockDim.x;
int id_y = threadIdx.y + blockIdx.y * blockDim.y;
while (id_x < SIZE)
{
while (id_y < SIZE)
{
printf("| %d ", Matrix[id_y * SIZE + id_x]);
id_y += blockDim.y * gridDim.y;
}
id_x += blockDim.x * gridDim.x;
}
printf("\n");
}
__global__ void Multiply_Matrices(const __int32* const Matrix_GPU_A, const __int32* const Matrix_GPU_B, __int32* const Matrix_GPU_C)
{
int id_x = threadIdx.x + blockIdx.x * blockDim.x;
int id_y = threadIdx.y + blockIdx.y * blockDim.y;
__int32 temporary{};
while (id_x < SIZE && id_y < SIZE)
{
for (size_t i = 0; i < SIZE; ++i)
{
temporary += Matrix_GPU_A[id_x * SIZE + i] * Matrix_GPU_B[i * SIZE + id_y];
}
Matrix_GPU_C[id_x * SIZE + id_y] = temporary;
temporary = NULL;
id_x += blockDim.x * gridDim.x;
id_y += blockDim.y * gridDim.y;
}
} | 511bee0b1a0d6289f204fc25d43ca804f4f75bdb.cu | #include "device_launch_parameters.h"
#ifndef __CUDACC__
#define __CUDACC__
#endif
#include "cuda_runtime.h"
#include "device_functions.h"
#include <stdio.h>
#include <stdlib.h>
#include <windows.h>
#include <iostream>
#include <fstream>
////////////////////////////////////////////////////
//GPU
#define HostToDevice cudaMemcpyHostToDevice
#define DeviceToHost cudaMemcpyDeviceToHost
#define OK cudaSuccess
#define NEW_LINE '\n'
#define SIZE 5
//CPU FUNCTIONS
void Show_Matrix(const __int32* const* const Matrix);
void Fill_Matrix(__int32* const* const Matrix, const __int32 initial_value);
void Fill_Matrix(__int32* const* const Matrix, const _STD string & file_path);
//GPU FUNCTIONS
__global__ void Show_Matrix_GPU(const __int32* const Matrix);
__global__ void Multiply_Matrices(const __int32* const Matrix_GPU_A, const __int32* const Matrix_GPU_B, __int32* const Matrix_GPU_C);
int main(int argc, char* argv[])
{
/*
ALLOCATING NEEDED MEMORY ON CPU (exactly in RAM)
*/
__int32** Matrix_CPU_A;
Matrix_CPU_A = (__int32**)malloc(SIZE * sizeof(__int32*));
__int32** Matrix_CPU_B;
Matrix_CPU_B = (__int32**)malloc(SIZE * sizeof(__int32*));
__int32** Matrix_CPU_C;
Matrix_CPU_C = (__int32**)malloc(SIZE * sizeof(__int32*));
for (size_t i = 0; i < SIZE; ++i)
{
*(Matrix_CPU_A + i) = (__int32*)malloc(SIZE * sizeof(__int32));
*(Matrix_CPU_B + i) = (__int32*)malloc(SIZE * sizeof(__int32));
*(Matrix_CPU_C + i) = (__int32*)malloc(SIZE * sizeof(__int32));
//Matrix_CPU[i] = (__int32*)malloc(SIZE * sizeof(__int32));
}
////////////////////////////////////////////////////
//FILL MATRIX WITH RANDOM VALUES
Fill_Matrix(Matrix_CPU_A, "file.in");
Fill_Matrix(Matrix_CPU_B, "file1.in");
Fill_Matrix(Matrix_CPU_C, 0);
//SHOW FILLED UP ARRAY
Show_Matrix(Matrix_CPU_A);
Show_Matrix(Matrix_CPU_B);
Show_Matrix(Matrix_CPU_C);
/*
ALLOCATING NEEDED MEMORY ON GPU
*/
__int32* Matrix_GPU_A;
cudaMalloc((void**)&Matrix_GPU_A, (SIZE * SIZE) * sizeof(__int32)); //GPU interprets 2D array as a flat array !
__int32* Matrix_GPU_B;
cudaMalloc((void**)&Matrix_GPU_B, (SIZE * SIZE) * sizeof(__int32));
__int32* Matrix_GPU_C;
cudaMalloc((void**)&Matrix_GPU_C, (SIZE * SIZE) * sizeof(__int32));
////////////////////////////////////////////////////
//COPY CPU ARRAY TO GPU
//HANDLE_ERROR(cudaMemcpy(Matrix_GPU, Matrix_CPU, (SIZE * SIZE) * sizeof(__int32), HostToDevice)); //only for statics array from HOST's!
for (size_t i = 0; i < SIZE; ++i)
{
cudaMemcpy(Matrix_GPU_A + i * SIZE, *(Matrix_CPU_A + i), sizeof(__int32) * SIZE, HostToDevice);
cudaMemcpy(Matrix_GPU_B + i * SIZE, *(Matrix_CPU_B + i), sizeof(__int32) * SIZE, HostToDevice);
cudaMemcpy(Matrix_GPU_C + i * SIZE, *(Matrix_CPU_C + i), sizeof(__int32) * SIZE, HostToDevice);
}
////////////////////////////////////////////////////
dim3 blocks(SIZE, SIZE);
dim3 threads(SIZE);
//MULTIPLY THE MATRICES
Multiply_Matrices <<<blocks, threads >>> (Matrix_GPU_A, Matrix_GPU_B, Matrix_GPU_C);
//COPY FROM GPU TO CPU
//HANDLE_ERROR(cudaMemcpy(Matrix_CPU, Matrix_GPU, (SIZE * SIZE) * sizeof(__int32), DeviceToHost)); //only for statics array
//copying data from GPU to CPU
for (size_t i = 0; i < SIZE; ++i)
{
cudaMemcpy(*(Matrix_CPU_C + i), Matrix_GPU_C + i * SIZE, sizeof(__int32) * SIZE, DeviceToHost); //for dynamic allocation, only result matrix
}
////////////////////////////////////////////////////
//SHOW RESULTS
Show_Matrix(Matrix_CPU_C);
/*
FREEING PREVIOUSLY ALOCATED MEMORY
*/
//ON CPU
for (size_t i = 0; i < SIZE; ++i)
{
free(*(Matrix_CPU_A + i));
free(*(Matrix_CPU_B + i));
free(*(Matrix_CPU_C + i));
}
free(Matrix_CPU_A);
free(Matrix_CPU_B);
free(Matrix_CPU_C);
//ON GPU
cudaFree(Matrix_GPU_A);
cudaFree(Matrix_GPU_B);
cudaFree(Matrix_GPU_C);
////////////////////////////////////////////////////
system("pause");
return 0;
}
void Show_Matrix(const __int32* const* const Matrix)
{
for (size_t i = 0; i < SIZE; ++i)
{
_STD cout << "| ";
for (size_t j = 0; j < SIZE; ++j)
{
_STD cout << Matrix[i][j] << " | ";
}
_STD cout << NEW_LINE;
}
_STD cout << NEW_LINE;
}
void Fill_Matrix(__int32* const* const Matrix, const __int32 initial_value)
{
for (size_t i = 0; i < SIZE; ++i)
{
for (size_t j = 0; j < SIZE; ++j)
{
//Matrix[i][j] = (i*SIZE)+j+initial_value;
Matrix[i][j] = initial_value;
}
}
}
void Fill_Matrix(__int32* const* const Matrix, const::std::string& file_path)
{
std::fstream file_in;
file_in.open(file_path.c_str(), std::ios_base::in);
size_t i{};
size_t j{};
if (file_in.good() == false)
{
exit(0);
}
else
{
while (file_in.eof() == false)
{
while (i < SIZE)
{
j = 0;
while (j < SIZE)
{
file_in >> Matrix[i][j];
++j;
}
++i;
}
j = NULL;
i = NULL;
}
}
file_in.close();
}
__global__ void Show_Matrix_GPU(const __int32* const Matrix)
{
int id_x = threadIdx.x + blockIdx.x * blockDim.x;
int id_y = threadIdx.y + blockIdx.y * blockDim.y;
while (id_x < SIZE)
{
while (id_y < SIZE)
{
printf("| %d ", Matrix[id_y * SIZE + id_x]);
id_y += blockDim.y * gridDim.y;
}
id_x += blockDim.x * gridDim.x;
}
printf("\n");
}
__global__ void Multiply_Matrices(const __int32* const Matrix_GPU_A, const __int32* const Matrix_GPU_B, __int32* const Matrix_GPU_C)
{
int id_x = threadIdx.x + blockIdx.x * blockDim.x;
int id_y = threadIdx.y + blockIdx.y * blockDim.y;
__int32 temporary{};
while (id_x < SIZE && id_y < SIZE)
{
for (size_t i = 0; i < SIZE; ++i)
{
temporary += Matrix_GPU_A[id_x * SIZE + i] * Matrix_GPU_B[i * SIZE + id_y];
}
Matrix_GPU_C[id_x * SIZE + id_y] = temporary;
temporary = NULL;
id_x += blockDim.x * gridDim.x;
id_y += blockDim.y * gridDim.y;
}
} |
1d2662641fe94ebb08cc51c95719f07d0924e6e6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void d_MM_OPT( float *a, float *b, float *c, int wA, int wB, int hA)
{
#define blockTile 16
/* Blocksize is 16x16 */
/* Allocate shared memory */
__shared__ float aBlock[blockTile][blockTile];
__shared__ float bBlock[blockTile][blockTile];
/* Calculate global index X, Y */
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int gx = blockDim.x * bx + tx; // column
int gy = blockDim.y * by + ty; // row
/* Compute offset idx for A & B */
// First A index (row shift) Block.row * Block.width * A.width
int a0 = wA * 16 * by;
// aBegin -> last element in row -> + width - 1
int aZ = a0 + wA - 1;
// Column block iteration = blockDim.x
int aD = 16;
// b_0 -> Column Shift
int b0 = 16 * bx;
// Row block iteration = blockDim.y * width B
int bD = 16 * wB;
float sum = 0.f;
for(int aI = a0, bI = b0; aI <= aZ; aI += aD, bI += bD)
{
/* Assign shared memory and sync */
/* Warning, wA*gidy may be out of bounds */
aBlock[ty][tx] = a[aI + ty*wA + tx];
bBlock[ty][tx] = b[bI + ty*wB + tx];
/* Make sure all of the threads have updated the memory cache */
__syncthreads();
/* Sum over NK */
for(int k=0; k < 16; k++)
{
/* C = (A x B) */
sum += aBlock[ty][k] * bBlock[k][tx];
}
}
c[gy*wB + gx] = sum;
//c[i * NJ + j] = ALPHA*sum + BETA*c[i * NJ + j];
} | 1d2662641fe94ebb08cc51c95719f07d0924e6e6.cu | #include "includes.h"
__global__ void d_MM_OPT( float *a, float *b, float *c, int wA, int wB, int hA)
{
#define blockTile 16
/* Blocksize is 16x16 */
/* Allocate shared memory */
__shared__ float aBlock[blockTile][blockTile];
__shared__ float bBlock[blockTile][blockTile];
/* Calculate global index X, Y */
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int gx = blockDim.x * bx + tx; // column
int gy = blockDim.y * by + ty; // row
/* Compute offset idx for A & B */
// First A index (row shift) Block.row * Block.width * A.width
int a0 = wA * 16 * by;
// aBegin -> last element in row -> + width - 1
int aZ = a0 + wA - 1;
// Column block iteration = blockDim.x
int aD = 16;
// b_0 -> Column Shift
int b0 = 16 * bx;
// Row block iteration = blockDim.y * width B
int bD = 16 * wB;
float sum = 0.f;
for(int aI = a0, bI = b0; aI <= aZ; aI += aD, bI += bD)
{
/* Assign shared memory and sync */
/* Warning, wA*gidy may be out of bounds */
aBlock[ty][tx] = a[aI + ty*wA + tx];
bBlock[ty][tx] = b[bI + ty*wB + tx];
/* Make sure all of the threads have updated the memory cache */
__syncthreads();
/* Sum over NK */
for(int k=0; k < 16; k++)
{
/* C = (A x B) */
sum += aBlock[ty][k] * bBlock[k][tx];
}
}
c[gy*wB + gx] = sum;
//c[i * NJ + j] = ALPHA*sum + BETA*c[i * NJ + j];
} |
ef4ab344e909f92d310fdc40bec2ff519f832fe8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/util/math_functions.hpp"
#include "caffe/layers/gaussian_loss_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void GaussianLossForward(const int nthreads,
const Dtype* x, const Dtype* mu, const Dtype* sigma, const int D,
const int N, const int bottom_size, Dtype* loss_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
if(index>=nthreads) return;
int item = index/D;
int idim = index%D;
Dtype logprob=0;
Dtype sig=fmaxf(0.01,fabs(sigma[item*D+idim]));
if(bottom_size>2){
logprob-=-log(sqrt(2*3.14159265*sig*sig))
-(pow(x[item*D+idim]-mu[item*D+idim],2)/(2*pow(sig,2)) );
} else {
logprob-=0.5*(1+log(sig*sig)
-(mu[item*D+idim]*mu[item*D+idim])
-(sig*sig));
}
loss_data[index]=logprob/N;
}
}
template <typename Dtype>
__global__ void GaussianLossBackward(const int nthreads, const Dtype* x,
const Dtype* mu, const Dtype* sigma, const int D, const int N,
const int bottom_size, Dtype* dmu, Dtype* dsigma) {
CUDA_KERNEL_LOOP(index, nthreads) {
if(index>=nthreads) return;
int item = index/D;
int idim = index%D;
Dtype sig=fmaxf(0.01,fabs(sigma[item*D+idim]));
if(bottom_size>2){
dmu[item*D+idim]=(-1*(x[item*D+idim]-mu[item*D+idim])/(sig*sig))/N;
dsigma[item*D+idim]=
(1/sig - pow(x[item*D+idim]-mu[item*D+idim],2)/pow(sig,3))/N;
} else {
dmu[item*D+idim]=mu[item*D+idim]/N;
dsigma[item*D+idim]=(-1*(1/sig - sig))/N;
}
}
}
template <typename Dtype>
__global__ void GaussClipGrad(const int nthreads, const int D,
const Dtype* loss_wt, const int mu_idx, const int sigma_idx,
const Dtype maxval, const Dtype* in_mu, const Dtype* in_sigma,
const Dtype* breakdown, Dtype* out_mu, Dtype* out_sigma) {
CUDA_KERNEL_LOOP(index, nthreads) {
if(index>=nthreads) return;
int item=index/D;
Dtype gradmax=fmaxf(fabs(in_mu[mu_idx]),fabs(in_sigma[sigma_idx]));
Dtype scalefac=1.;
if(maxval>0 && gradmax>maxval) scalefac=maxval/gradmax;
if(breakdown){
scalefac*=breakdown[item];
} else {
scalefac*=loss_wt[0];
}
out_mu[index]=scalefac*in_mu[index];
out_sigma[index]=scalefac*in_sigma[index];
}
}
template <typename Dtype>
void GaussianLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* mu=bottom[0]->gpu_data();
const Dtype* sigma=bottom[1]->gpu_data();
const Dtype* x=NULL;
if(bottom.size()>2){
x=bottom[2]->gpu_data();
}
Dtype* loss=top[0]->mutable_cpu_data();
Dtype* breakdown=NULL;
if(top.size()>=2){
breakdown=top[1]->mutable_cpu_data();
}
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
int N=bottom[0]->shape(0);
int D=bottom[0]->count(1);
loss[0]=0;
hipLaunchKernelGGL(( GaussianLossForward<Dtype>), dim3(CAFFE_GET_BLOCKS(bottom[0]->count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
bottom[0]->count(), x, mu, sigma, D, N, bottom.size(), loss_data);
int nthreads=bottom[0]->count();
caffe_gpu_asum<Dtype>(nthreads, loss_data, loss);
if(breakdown){
for(int i=0;i<N;i++){
caffe_gpu_asum(D,loss_data+i*D,breakdown+i);
}
}
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
void GaussianLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* mu=bottom[0]->gpu_data();
const Dtype* sigma=bottom[1]->gpu_data();
const Dtype* x=NULL;
if(bottom.size()>2){
x=bottom[2]->gpu_data();
}
const Dtype* loss_wt=top[0]->gpu_diff();
const Dtype* breakdown=NULL;
if(top.size()>=2) {
breakdown=top[1]->gpu_diff();
}
Dtype* dmu=bottom[0]->mutable_gpu_diff();
Dtype* dsigma=bottom[1]->mutable_gpu_diff();
int N=bottom[0]->shape(0);
int D=bottom[0]->shape(1);
if(breakdown){ //we will apply loss weights from breakdown, not global loss, during clip grad later
hipLaunchKernelGGL(( GaussianLossBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(bottom[0]->count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
bottom[0]->count(), x, mu, sigma, D, N, bottom.size(),
temp_.mutable_gpu_data(), temp_.mutable_gpu_diff()); //last args are dmu, dsigma
} else {
hipLaunchKernelGGL(( GaussianLossBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(bottom[0]->count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
bottom[0]->count(), x, mu, sigma, D, N, bottom.size(),
temp_.mutable_gpu_data(), temp_.mutable_gpu_diff()); //last args are dmu, dsigma
}
int maxgrad_mu_idx,maxgrad_sigma_idx;
caffe_gpu_absmax<Dtype>(temp_.count(),temp_.gpu_data(),&maxgrad_mu_idx);
caffe_gpu_absmax<Dtype>(temp_.count(),temp_.gpu_diff(),&maxgrad_sigma_idx);
maxgrad_mu_idx-=1; //correct for fortran-style indexing
maxgrad_sigma_idx-=1;
hipLaunchKernelGGL(( GaussClipGrad<Dtype>), dim3(CAFFE_GET_BLOCKS(bottom[0]->count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
bottom[0]->count(), D, loss_wt, maxgrad_mu_idx, maxgrad_sigma_idx,
cliplimit_,temp_.gpu_data(), temp_.gpu_diff(), breakdown,
bottom[0]->mutable_gpu_diff(), bottom[1]->mutable_gpu_diff()); //dmu, dsigma
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(GaussianLossLayer);
} // namespace caffe
| ef4ab344e909f92d310fdc40bec2ff519f832fe8.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/util/math_functions.hpp"
#include "caffe/layers/gaussian_loss_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void GaussianLossForward(const int nthreads,
const Dtype* x, const Dtype* mu, const Dtype* sigma, const int D,
const int N, const int bottom_size, Dtype* loss_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
if(index>=nthreads) return;
int item = index/D;
int idim = index%D;
Dtype logprob=0;
Dtype sig=fmaxf(0.01,fabs(sigma[item*D+idim]));
if(bottom_size>2){
logprob-=-log(sqrt(2*3.14159265*sig*sig))
-(pow(x[item*D+idim]-mu[item*D+idim],2)/(2*pow(sig,2)) );
} else {
logprob-=0.5*(1+log(sig*sig)
-(mu[item*D+idim]*mu[item*D+idim])
-(sig*sig));
}
loss_data[index]=logprob/N;
}
}
template <typename Dtype>
__global__ void GaussianLossBackward(const int nthreads, const Dtype* x,
const Dtype* mu, const Dtype* sigma, const int D, const int N,
const int bottom_size, Dtype* dmu, Dtype* dsigma) {
CUDA_KERNEL_LOOP(index, nthreads) {
if(index>=nthreads) return;
int item = index/D;
int idim = index%D;
Dtype sig=fmaxf(0.01,fabs(sigma[item*D+idim]));
if(bottom_size>2){
dmu[item*D+idim]=(-1*(x[item*D+idim]-mu[item*D+idim])/(sig*sig))/N;
dsigma[item*D+idim]=
(1/sig - pow(x[item*D+idim]-mu[item*D+idim],2)/pow(sig,3))/N;
} else {
dmu[item*D+idim]=mu[item*D+idim]/N;
dsigma[item*D+idim]=(-1*(1/sig - sig))/N;
}
}
}
template <typename Dtype>
__global__ void GaussClipGrad(const int nthreads, const int D,
const Dtype* loss_wt, const int mu_idx, const int sigma_idx,
const Dtype maxval, const Dtype* in_mu, const Dtype* in_sigma,
const Dtype* breakdown, Dtype* out_mu, Dtype* out_sigma) {
CUDA_KERNEL_LOOP(index, nthreads) {
if(index>=nthreads) return;
int item=index/D;
Dtype gradmax=fmaxf(fabs(in_mu[mu_idx]),fabs(in_sigma[sigma_idx]));
Dtype scalefac=1.;
if(maxval>0 && gradmax>maxval) scalefac=maxval/gradmax;
if(breakdown){
scalefac*=breakdown[item];
} else {
scalefac*=loss_wt[0];
}
out_mu[index]=scalefac*in_mu[index];
out_sigma[index]=scalefac*in_sigma[index];
}
}
template <typename Dtype>
void GaussianLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* mu=bottom[0]->gpu_data();
const Dtype* sigma=bottom[1]->gpu_data();
const Dtype* x=NULL;
if(bottom.size()>2){
x=bottom[2]->gpu_data();
}
Dtype* loss=top[0]->mutable_cpu_data();
Dtype* breakdown=NULL;
if(top.size()>=2){
breakdown=top[1]->mutable_cpu_data();
}
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
int N=bottom[0]->shape(0);
int D=bottom[0]->count(1);
loss[0]=0;
GaussianLossForward<Dtype><<<CAFFE_GET_BLOCKS(bottom[0]->count()), CAFFE_CUDA_NUM_THREADS>>>(
bottom[0]->count(), x, mu, sigma, D, N, bottom.size(), loss_data);
int nthreads=bottom[0]->count();
caffe_gpu_asum<Dtype>(nthreads, loss_data, loss);
if(breakdown){
for(int i=0;i<N;i++){
caffe_gpu_asum(D,loss_data+i*D,breakdown+i);
}
}
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
void GaussianLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* mu=bottom[0]->gpu_data();
const Dtype* sigma=bottom[1]->gpu_data();
const Dtype* x=NULL;
if(bottom.size()>2){
x=bottom[2]->gpu_data();
}
const Dtype* loss_wt=top[0]->gpu_diff();
const Dtype* breakdown=NULL;
if(top.size()>=2) {
breakdown=top[1]->gpu_diff();
}
Dtype* dmu=bottom[0]->mutable_gpu_diff();
Dtype* dsigma=bottom[1]->mutable_gpu_diff();
int N=bottom[0]->shape(0);
int D=bottom[0]->shape(1);
if(breakdown){ //we will apply loss weights from breakdown, not global loss, during clip grad later
GaussianLossBackward<Dtype><<<CAFFE_GET_BLOCKS(bottom[0]->count()), CAFFE_CUDA_NUM_THREADS>>>(
bottom[0]->count(), x, mu, sigma, D, N, bottom.size(),
temp_.mutable_gpu_data(), temp_.mutable_gpu_diff()); //last args are dmu, dsigma
} else {
GaussianLossBackward<Dtype><<<CAFFE_GET_BLOCKS(bottom[0]->count()), CAFFE_CUDA_NUM_THREADS>>>(
bottom[0]->count(), x, mu, sigma, D, N, bottom.size(),
temp_.mutable_gpu_data(), temp_.mutable_gpu_diff()); //last args are dmu, dsigma
}
int maxgrad_mu_idx,maxgrad_sigma_idx;
caffe_gpu_absmax<Dtype>(temp_.count(),temp_.gpu_data(),&maxgrad_mu_idx);
caffe_gpu_absmax<Dtype>(temp_.count(),temp_.gpu_diff(),&maxgrad_sigma_idx);
maxgrad_mu_idx-=1; //correct for fortran-style indexing
maxgrad_sigma_idx-=1;
GaussClipGrad<Dtype><<<CAFFE_GET_BLOCKS(bottom[0]->count()), CAFFE_CUDA_NUM_THREADS>>>(
bottom[0]->count(), D, loss_wt, maxgrad_mu_idx, maxgrad_sigma_idx,
cliplimit_,temp_.gpu_data(), temp_.gpu_diff(), breakdown,
bottom[0]->mutable_gpu_diff(), bottom[1]->mutable_gpu_diff()); //dmu, dsigma
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(GaussianLossLayer);
} // namespace caffe
|
d4062e88e00f117383b541fa065c4459aa164c3c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "ElementwiseNorm.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *A = NULL;
hipMalloc(&A, XSIZE*YSIZE);
float *B = NULL;
hipMalloc(&B, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
ElementwiseNorm), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,size);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
ElementwiseNorm), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
ElementwiseNorm), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | d4062e88e00f117383b541fa065c4459aa164c3c.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "ElementwiseNorm.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *A = NULL;
cudaMalloc(&A, XSIZE*YSIZE);
float *B = NULL;
cudaMalloc(&B, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
ElementwiseNorm<<<gridBlock,threadBlock>>>(A,B,size);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
ElementwiseNorm<<<gridBlock,threadBlock>>>(A,B,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
ElementwiseNorm<<<gridBlock,threadBlock>>>(A,B,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
b2165e04dd2f35ace43386069979b8676e4a8045.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorTopK.cu"
#else
THC_API void THCTensor_(topk)(THCState* state,
THCTensor *topK,
THCudaLongTensor *indices,
THCTensor *input_,
int64_t k, int dim, int dir, int sorted) {
THAssert(topK != NULL && indices != NULL && input_ != NULL);
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, topK, indices, input_));
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, topK) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
int64_t dims = THCudaLongTensor_nDimensionLegacyNoScalars(state, indices);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 3, CUTORCH_DIM_WARNING);
int numDims = THCTensor_(nDimensionLegacyNoScalars)(state, input_);
THArgCheck(numDims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING);
THArgCheck(dim >= 0 && dim < numDims, 6, "dim not in range");
int64_t sliceSize = THCTensor_(sizeLegacyNoScalars)(state, input_, dim);
THArgCheck(k >= 0 && k <= sliceSize, 5, "k not in range for dimension");
THCTensor *input = THCTensor_(newContiguous)(state, input_);
// Build the output size, which is the dim being selected set to
// size k
THLongStorage* topKSize = THCTensor_(newSizeOf)(state, input);
THLongStorage_set(topKSize, dim, k);
THCTensor_(resize)(state, topK, topKSize, NULL);
THCudaLongTensor_resize(state, indices, topKSize, NULL);
THLongStorage_free(topKSize);
#define RUN_K(INDEX_T, DIM, DIR) \
hipLaunchKernelGGL(( gatherTopK<real, INDEX_T, DIM, DIR>) \
, dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), \
inputInfo, \
sliceSize, \
k, \
inputSlices, \
/* The actual dimension that the k-selection is running in */ \
/* may have changed from collapseDims() */ \
inputInfo.strides[collapseInputDim], \
topKInfo, \
topKSlices, \
topKInfo.strides[collapseTopKDim], \
indicesInfo, \
indicesInfo.strides[collapseIndicesDim])
#define RUN_DIR(INDEX_T, DIM) \
if (dir) { \
RUN_K(INDEX_T, DIM, true); \
} else { \
RUN_K(INDEX_T, DIM, false); \
}
#define RUN_DIM(INDEX_T) \
if (allDims == 1) { \
RUN_DIR(INDEX_T, 1); \
} else if (allDims == 2) { \
RUN_DIR(INDEX_T, 2); \
} else if (allDims == 3) { \
RUN_DIR(INDEX_T, 3); \
} else { \
RUN_DIR(INDEX_T, -1); \
}
#define RUN_T(INDEX_T) \
TensorInfo<real, INDEX_T> inputInfo = \
getTensorInfo<real, THCTensor, INDEX_T>(state, input); \
TensorInfo<real, INDEX_T> topKInfo = \
getTensorInfo<real, THCTensor, INDEX_T>(state, topK); \
TensorInfo<int64_t, INDEX_T> indicesInfo = \
getTensorInfo<int64_t, THCudaLongTensor, INDEX_T>(state, indices); \
\
/* We use these structures solely to find the offset to */ \
/* each slice we are operating on */ \
inputInfo.sizes[dim] = 1; \
topKInfo.sizes[dim] = 1; \
indicesInfo.sizes[dim] = 1; \
\
/* Collapse all other dims */ \
int collapseInputDim = inputInfo.collapseDims(dim); \
int collapseTopKDim = topKInfo.collapseDims(dim); \
int collapseIndicesDim = indicesInfo.collapseDims(dim); \
\
int64_t inputSlices = 1; \
for (int i = 0; i < inputInfo.dims; ++i) { \
inputSlices *= inputInfo.sizes[i]; \
} \
int64_t topKSlices = 1; \
for (int i = 0; i < topKInfo.dims; ++i) { \
topKSlices *= topKInfo.sizes[i]; \
} \
\
dim3 grid; \
if (!THC_getGridFromTiles(inputSlices, grid)) { \
THError("Slice to sort is too large"); \
} \
\
dim3 block(::min(THCRoundUp(sliceSize, (int64_t) 32), (int64_t) 1024)); \
\
/* This is used as a template parameter to calculate indices. */ \
/* We only specialize it if all collapsed dim sizes are the */ \
/* same; otherwise, we use -1 which is the specialization */ \
/* parameter for arbitrary dimensions */ \
int allDims = inputInfo.dims; \
if (topKInfo.dims != allDims || indicesInfo.dims != allDims) { \
allDims = -1; \
} \
\
RUN_DIM(INDEX_T);
if (THCTensor_nElement(state, input) > 0) {
// Based on required index size, run the algorithm with the
// appropriate index type
if (THCTensor_canUse32BitIndexMath(state, input) &&
THCTensor_canUse32BitIndexMath(state, topK) &&
THCTensor_canUse32BitIndexMath(state, indices)) {
RUN_T(uint32_t);
} else {
RUN_T(uint64_t);
}
}
#undef RUN_T
#undef RUN_DIM
#undef RUN_DIR
#undef RUN_K
// Sort the results if the user wants them sorted, since our
// selection routine does not ensure sorting
if (sorted) {
// FIXME: the k/v inplace sort along slice only works for size <=
// 2048 at the moment
if (sliceSize <= 2048) {
// This avoids any memory allocations and performs all sorting
// work inplace along the slice
THCTensor_(sortKeyValueInplace)(state, topK, indices, dim, dir);
} else {
// Depend upon the backup sort that returns indices, which we
// can use in conjunction with gather to produce the original
// indices.
// This is not the most efficient implementation, especially since
// there are memory allocations performed here. If the user desires
// greater performance, they should torch.gather() the results
// themselves using the reported indices, providing previously
// allocated tensors to receive the results.
THCTensor* sortedTopK = THCTensor_(new)(state);
THCudaLongTensor* sortedIndices = THCudaLongTensor_new(state);
THCTensor_(sort)(state, sortedTopK, sortedIndices, topK, dim, dir);
THCudaLongTensor* sortedTopKIndices = THCudaLongTensor_new(state);
THCudaLongTensor_resizeAs(state, sortedTopKIndices, indices);
THCudaLongTensor_gather(state, sortedTopKIndices, indices, dim, sortedIndices);
THCTensor_(freeCopyTo)(state, sortedTopK, topK);
THCudaLongTensor_freeCopyTo(state, sortedTopKIndices, indices);
THCudaLongTensor_free(state, sortedIndices);
}
}
THCudaLongTensor_free(state, input);
THCudaCheck(hipGetLastError());
}
#endif // THC_GENERIC_FILE
| b2165e04dd2f35ace43386069979b8676e4a8045.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorTopK.cu"
#else
THC_API void THCTensor_(topk)(THCState* state,
THCTensor *topK,
THCudaLongTensor *indices,
THCTensor *input_,
int64_t k, int dim, int dir, int sorted) {
THAssert(topK != NULL && indices != NULL && input_ != NULL);
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, topK, indices, input_));
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, topK) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
int64_t dims = THCudaLongTensor_nDimensionLegacyNoScalars(state, indices);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 3, CUTORCH_DIM_WARNING);
int numDims = THCTensor_(nDimensionLegacyNoScalars)(state, input_);
THArgCheck(numDims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING);
THArgCheck(dim >= 0 && dim < numDims, 6, "dim not in range");
int64_t sliceSize = THCTensor_(sizeLegacyNoScalars)(state, input_, dim);
THArgCheck(k >= 0 && k <= sliceSize, 5, "k not in range for dimension");
THCTensor *input = THCTensor_(newContiguous)(state, input_);
// Build the output size, which is the dim being selected set to
// size k
THLongStorage* topKSize = THCTensor_(newSizeOf)(state, input);
THLongStorage_set(topKSize, dim, k);
THCTensor_(resize)(state, topK, topKSize, NULL);
THCudaLongTensor_resize(state, indices, topKSize, NULL);
THLongStorage_free(topKSize);
#define RUN_K(INDEX_T, DIM, DIR) \
gatherTopK<real, INDEX_T, DIM, DIR> \
<<<grid, block, 0, THCState_getCurrentStream(state)>>>( \
inputInfo, \
sliceSize, \
k, \
inputSlices, \
/* The actual dimension that the k-selection is running in */ \
/* may have changed from collapseDims() */ \
inputInfo.strides[collapseInputDim], \
topKInfo, \
topKSlices, \
topKInfo.strides[collapseTopKDim], \
indicesInfo, \
indicesInfo.strides[collapseIndicesDim])
#define RUN_DIR(INDEX_T, DIM) \
if (dir) { \
RUN_K(INDEX_T, DIM, true); \
} else { \
RUN_K(INDEX_T, DIM, false); \
}
#define RUN_DIM(INDEX_T) \
if (allDims == 1) { \
RUN_DIR(INDEX_T, 1); \
} else if (allDims == 2) { \
RUN_DIR(INDEX_T, 2); \
} else if (allDims == 3) { \
RUN_DIR(INDEX_T, 3); \
} else { \
RUN_DIR(INDEX_T, -1); \
}
#define RUN_T(INDEX_T) \
TensorInfo<real, INDEX_T> inputInfo = \
getTensorInfo<real, THCTensor, INDEX_T>(state, input); \
TensorInfo<real, INDEX_T> topKInfo = \
getTensorInfo<real, THCTensor, INDEX_T>(state, topK); \
TensorInfo<int64_t, INDEX_T> indicesInfo = \
getTensorInfo<int64_t, THCudaLongTensor, INDEX_T>(state, indices); \
\
/* We use these structures solely to find the offset to */ \
/* each slice we are operating on */ \
inputInfo.sizes[dim] = 1; \
topKInfo.sizes[dim] = 1; \
indicesInfo.sizes[dim] = 1; \
\
/* Collapse all other dims */ \
int collapseInputDim = inputInfo.collapseDims(dim); \
int collapseTopKDim = topKInfo.collapseDims(dim); \
int collapseIndicesDim = indicesInfo.collapseDims(dim); \
\
int64_t inputSlices = 1; \
for (int i = 0; i < inputInfo.dims; ++i) { \
inputSlices *= inputInfo.sizes[i]; \
} \
int64_t topKSlices = 1; \
for (int i = 0; i < topKInfo.dims; ++i) { \
topKSlices *= topKInfo.sizes[i]; \
} \
\
dim3 grid; \
if (!THC_getGridFromTiles(inputSlices, grid)) { \
THError("Slice to sort is too large"); \
} \
\
dim3 block(std::min(THCRoundUp(sliceSize, (int64_t) 32), (int64_t) 1024)); \
\
/* This is used as a template parameter to calculate indices. */ \
/* We only specialize it if all collapsed dim sizes are the */ \
/* same; otherwise, we use -1 which is the specialization */ \
/* parameter for arbitrary dimensions */ \
int allDims = inputInfo.dims; \
if (topKInfo.dims != allDims || indicesInfo.dims != allDims) { \
allDims = -1; \
} \
\
RUN_DIM(INDEX_T);
if (THCTensor_nElement(state, input) > 0) {
// Based on required index size, run the algorithm with the
// appropriate index type
if (THCTensor_canUse32BitIndexMath(state, input) &&
THCTensor_canUse32BitIndexMath(state, topK) &&
THCTensor_canUse32BitIndexMath(state, indices)) {
RUN_T(uint32_t);
} else {
RUN_T(uint64_t);
}
}
#undef RUN_T
#undef RUN_DIM
#undef RUN_DIR
#undef RUN_K
// Sort the results if the user wants them sorted, since our
// selection routine does not ensure sorting
if (sorted) {
// FIXME: the k/v inplace sort along slice only works for size <=
// 2048 at the moment
if (sliceSize <= 2048) {
// This avoids any memory allocations and performs all sorting
// work inplace along the slice
THCTensor_(sortKeyValueInplace)(state, topK, indices, dim, dir);
} else {
// Depend upon the backup sort that returns indices, which we
// can use in conjunction with gather to produce the original
// indices.
// This is not the most efficient implementation, especially since
// there are memory allocations performed here. If the user desires
// greater performance, they should torch.gather() the results
// themselves using the reported indices, providing previously
// allocated tensors to receive the results.
THCTensor* sortedTopK = THCTensor_(new)(state);
THCudaLongTensor* sortedIndices = THCudaLongTensor_new(state);
THCTensor_(sort)(state, sortedTopK, sortedIndices, topK, dim, dir);
THCudaLongTensor* sortedTopKIndices = THCudaLongTensor_new(state);
THCudaLongTensor_resizeAs(state, sortedTopKIndices, indices);
THCudaLongTensor_gather(state, sortedTopKIndices, indices, dim, sortedIndices);
THCTensor_(freeCopyTo)(state, sortedTopK, topK);
THCudaLongTensor_freeCopyTo(state, sortedTopKIndices, indices);
THCudaLongTensor_free(state, sortedIndices);
}
}
THCudaLongTensor_free(state, input);
THCudaCheck(cudaGetLastError());
}
#endif // THC_GENERIC_FILE
|
70da85628228e4d19662b7cab0994cf7f7af6851.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2013 Yangqing Jia
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
#include "caffe/util/math_functions.hpp"
using std::max;
using std::min;
namespace caffe {
template <typename Dtype>
__global__ void AveForwardLayer(const int nthread, const int n_proposal, const int label_num, const int patch_dim, const Dtype* bottom_data, const Dtype* score_mat, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthread) {
int d = index % patch_dim;
int la = (index / patch_dim) % label_num;
int p = (index / patch_dim / label_num) % n_proposal;
top_data[la*patch_dim + d] += score_mat[p*label_num + la] * bottom_data[p*patch_dim + d]/n_proposal;
}
}
template <typename Dtype>
__global__ void AveBackwardLayer(const int nthread, const int n_proposal, const int label_num, const int patch_dim, const Dtype* top_diff, const Dtype* score_mat, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthread) {
int p = index % n_proposal;
int la = (index / n_proposal) % label_num;
int d = (index / n_proposal / label_num) % patch_dim;
bottom_diff[p*patch_dim + d] += score_mat[p*label_num + la] * top_diff[la*patch_dim + d]/n_proposal;
}
}
// Forward_cpu for HashFusionLayer
template <typename Dtype>
void HashFusionLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top){
// const Dtype* bottom_data = bottom[0]->gpu_data();
// Dtype* top_data = (top)[0]->mutable_cpu_data();
// //Different patches are fusing into one image via max_pooling
// const Dtype* score_mat = bottom[1]->gpu_data();
// const Dtype* conv5_scales = bottom[2]->cpu_data();
// const int n_scales = bottom[2]->channels();
// caffe_set(top[0]->count(), Dtype(0), top_data);
// top_data = (top)[0]->mutable_gpu_data();
// for (int n = 0; n < img_num_; ++n){
// int n_proposal = conv5_scales[n*n_scales];
// int nthread = n_proposal * label_num_ * patch_dim_;
// AveForwardLayer<<<CAFFE_GET_BLOCKS(nthread), CAFFE_CUDA_NUM_THREADS>>>(
// nthread, n_proposal, label_num_, patch_dim_, bottom_data,score_mat, top_data);
// bottom_data += bottom[0]->offset(patch_num_each_img_);
// score_mat += bottom[1]->offset(patch_num_each_img_);
// top_data += (top)[0]->offset(1);
// }
Forward_cpu(bottom, top);
}
// Backward_cpu for HashFusionLayer
template <typename Dtype>
void HashFusionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,const vector<Blob<Dtype>*>& bottom){
Backward_cpu(top, propagate_down, bottom);
// if (!propagate_down[0]){
// //return Dtype(0.);
// }
// const Dtype* top_diff = top[0]->gpu_diff();
// Dtype* bottom_diff = (bottom)[0]->mutable_cpu_diff();
// const Dtype* score_mat = bottom[1]->gpu_data();
// memset(bottom_diff, 0, (bottom)[0]->count() * sizeof(Dtype));
// bottom_diff = (bottom)[0]->mutable_gpu_diff();
// const Dtype* conv5_scales = bottom[2]->cpu_data();
// const int n_scales = bottom[2]->channels();
// for (int n = 0; n < img_num_; ++n){
// int n_proposal = conv5_scales[n*n_scales];
// int nthread = n_proposal * label_num_ * patch_dim_;
// AveBackwardLayer<<<CAFFE_GET_BLOCKS(nthread), CAFFE_CUDA_NUM_THREADS>>>(
// nthread, n_proposal, label_num_, patch_dim_,top_diff, score_mat, bottom_diff);
// score_mat += (bottom)[1]->offset(patch_num_each_img_);
// bottom_diff += (bottom)[0]->offset(patch_num_each_img_);
// top_diff += top[0]->offset(1);
// }
}
INSTANTIATE_LAYER_GPU_FUNCS(HashFusionLayer);
} // namespace caffe
| 70da85628228e4d19662b7cab0994cf7f7af6851.cu | // Copyright 2013 Yangqing Jia
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
#include "caffe/util/math_functions.hpp"
using std::max;
using std::min;
namespace caffe {
template <typename Dtype>
__global__ void AveForwardLayer(const int nthread, const int n_proposal, const int label_num, const int patch_dim, const Dtype* bottom_data, const Dtype* score_mat, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthread) {
int d = index % patch_dim;
int la = (index / patch_dim) % label_num;
int p = (index / patch_dim / label_num) % n_proposal;
top_data[la*patch_dim + d] += score_mat[p*label_num + la] * bottom_data[p*patch_dim + d]/n_proposal;
}
}
template <typename Dtype>
__global__ void AveBackwardLayer(const int nthread, const int n_proposal, const int label_num, const int patch_dim, const Dtype* top_diff, const Dtype* score_mat, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthread) {
int p = index % n_proposal;
int la = (index / n_proposal) % label_num;
int d = (index / n_proposal / label_num) % patch_dim;
bottom_diff[p*patch_dim + d] += score_mat[p*label_num + la] * top_diff[la*patch_dim + d]/n_proposal;
}
}
// Forward_cpu for HashFusionLayer
template <typename Dtype>
void HashFusionLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top){
// const Dtype* bottom_data = bottom[0]->gpu_data();
// Dtype* top_data = (top)[0]->mutable_cpu_data();
// //Different patches are fusing into one image via max_pooling
// const Dtype* score_mat = bottom[1]->gpu_data();
// const Dtype* conv5_scales = bottom[2]->cpu_data();
// const int n_scales = bottom[2]->channels();
// caffe_set(top[0]->count(), Dtype(0), top_data);
// top_data = (top)[0]->mutable_gpu_data();
// for (int n = 0; n < img_num_; ++n){
// int n_proposal = conv5_scales[n*n_scales];
// int nthread = n_proposal * label_num_ * patch_dim_;
// AveForwardLayer<<<CAFFE_GET_BLOCKS(nthread), CAFFE_CUDA_NUM_THREADS>>>(
// nthread, n_proposal, label_num_, patch_dim_, bottom_data,score_mat, top_data);
// bottom_data += bottom[0]->offset(patch_num_each_img_);
// score_mat += bottom[1]->offset(patch_num_each_img_);
// top_data += (top)[0]->offset(1);
// }
Forward_cpu(bottom, top);
}
// Backward_cpu for HashFusionLayer
template <typename Dtype>
void HashFusionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,const vector<Blob<Dtype>*>& bottom){
Backward_cpu(top, propagate_down, bottom);
// if (!propagate_down[0]){
// //return Dtype(0.);
// }
// const Dtype* top_diff = top[0]->gpu_diff();
// Dtype* bottom_diff = (bottom)[0]->mutable_cpu_diff();
// const Dtype* score_mat = bottom[1]->gpu_data();
// memset(bottom_diff, 0, (bottom)[0]->count() * sizeof(Dtype));
// bottom_diff = (bottom)[0]->mutable_gpu_diff();
// const Dtype* conv5_scales = bottom[2]->cpu_data();
// const int n_scales = bottom[2]->channels();
// for (int n = 0; n < img_num_; ++n){
// int n_proposal = conv5_scales[n*n_scales];
// int nthread = n_proposal * label_num_ * patch_dim_;
// AveBackwardLayer<<<CAFFE_GET_BLOCKS(nthread), CAFFE_CUDA_NUM_THREADS>>>(
// nthread, n_proposal, label_num_, patch_dim_,top_diff, score_mat, bottom_diff);
// score_mat += (bottom)[1]->offset(patch_num_each_img_);
// bottom_diff += (bottom)[0]->offset(patch_num_each_img_);
// top_diff += top[0]->offset(1);
// }
}
INSTANTIATE_LAYER_GPU_FUNCS(HashFusionLayer);
} // namespace caffe
|
cccf2af87a0053f0e80ddd4e10d2fee8ab57a181.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime_api.h>
#include <time.h>
/****************************************************************************
This program gives an example of a poor way to implement a password cracker
in CUDA C. It is poor because it acheives this with just one thread, which
is obviously not good given the scale of parallelism available to CUDA
programs.
The intentions of this program are:
1) Demonstrate the use of __device__ and __global__ functions
2) Enable a simulation of password cracking in the absence of library
with equivalent functionality to libcrypt. The password to be found
is hardcoded into a function called is_a_match.
Compile and run with:
nvcc -o Passwordcracking2digit Passwordcracking2digit.cu
To Run:
./Passwordcracking2digit > resultscuda_2alp2dig.txt
Dr Kevan Buckley, University of Wolverhampton, 2018
*****************************************************************************/
/****************************************************************************
This function returns 1 if the attempt at cracking the password is
identical to the plain text password string stored in the program.
Otherwise,it returns 0.
*****************************************************************************/
__device__ int is_a_match(char *attempt) {
char p1[] = "CV75";
char p2[] = "FR51";
char p3[] = "TB51";
char p4[] = "IS95";
char *w = attempt;
char *x = attempt;
char *y = attempt;
char *z = attempt;
char *password1 = p1;
char *password2 = p2;
char *password3 = p3;
char *password4 = p4;
while(*w == *password1) {
if(*w == '\0')
{
printf("Found password: %s\n",p1);
break;
}
w++;
password1++;
}
while(*x == *password2) {
if(*x == '\0')
{
printf("Found password: %s\n",p2);
break;
}
x++;
password2++;
}
while(*y == *password3) {
if(*y == '\0')
{
printf("Found password: %s\n",p3);
break;
}
y++;
password3++;
}
while(*z == *password4) {
if(*z == '\0')
{
printf("Found password: %s\n",p4);
return 1;
}
z++;
password4++;
}
return 0;
}
/****************************************************************************
The kernel function assume that there will be only one thread and uses
nested loops to generate all possible passwords and test whether they match
the hidden password.
*****************************************************************************/
__global__ void kernel() {
char a,b;
char password[5];
password[4] = '\0';
int i = blockIdx.x+65;
int j = threadIdx.x+65;
char firstValue = i;
char secondValue = j;
password[0] = firstValue;
password[1] = secondValue;
for(a='0'; a<='9'; a++){
for(b='0'; b<='9'; b++){
password[2] = a;
password[3] = b;
if(is_a_match(password)) {
//printf("Success");
}
else {
//printf("tried: %s\n", password);
}
}
}
}
int time_difference(struct timespec *start,
struct timespec *finish,
long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 ) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main() {
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
hipLaunchKernelGGL((
kernel) , dim3(26),dim3(26), 0, 0, );
hipDeviceSynchronize();
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9));
return 0;
}
| cccf2af87a0053f0e80ddd4e10d2fee8ab57a181.cu | #include <stdio.h>
#include <cuda_runtime_api.h>
#include <time.h>
/****************************************************************************
This program gives an example of a poor way to implement a password cracker
in CUDA C. It is poor because it acheives this with just one thread, which
is obviously not good given the scale of parallelism available to CUDA
programs.
The intentions of this program are:
1) Demonstrate the use of __device__ and __global__ functions
2) Enable a simulation of password cracking in the absence of library
with equivalent functionality to libcrypt. The password to be found
is hardcoded into a function called is_a_match.
Compile and run with:
nvcc -o Passwordcracking2digit Passwordcracking2digit.cu
To Run:
./Passwordcracking2digit > resultscuda_2alp2dig.txt
Dr Kevan Buckley, University of Wolverhampton, 2018
*****************************************************************************/
/****************************************************************************
This function returns 1 if the attempt at cracking the password is
identical to the plain text password string stored in the program.
Otherwise,it returns 0.
*****************************************************************************/
__device__ int is_a_match(char *attempt) {
char p1[] = "CV75";
char p2[] = "FR51";
char p3[] = "TB51";
char p4[] = "IS95";
char *w = attempt;
char *x = attempt;
char *y = attempt;
char *z = attempt;
char *password1 = p1;
char *password2 = p2;
char *password3 = p3;
char *password4 = p4;
while(*w == *password1) {
if(*w == '\0')
{
printf("Found password: %s\n",p1);
break;
}
w++;
password1++;
}
while(*x == *password2) {
if(*x == '\0')
{
printf("Found password: %s\n",p2);
break;
}
x++;
password2++;
}
while(*y == *password3) {
if(*y == '\0')
{
printf("Found password: %s\n",p3);
break;
}
y++;
password3++;
}
while(*z == *password4) {
if(*z == '\0')
{
printf("Found password: %s\n",p4);
return 1;
}
z++;
password4++;
}
return 0;
}
/****************************************************************************
The kernel function assume that there will be only one thread and uses
nested loops to generate all possible passwords and test whether they match
the hidden password.
*****************************************************************************/
__global__ void kernel() {
char a,b;
char password[5];
password[4] = '\0';
int i = blockIdx.x+65;
int j = threadIdx.x+65;
char firstValue = i;
char secondValue = j;
password[0] = firstValue;
password[1] = secondValue;
for(a='0'; a<='9'; a++){
for(b='0'; b<='9'; b++){
password[2] = a;
password[3] = b;
if(is_a_match(password)) {
//printf("Success");
}
else {
//printf("tried: %s\n", password);
}
}
}
}
int time_difference(struct timespec *start,
struct timespec *finish,
long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 ) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main() {
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
kernel <<<26,26>>>();
cudaThreadSynchronize();
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9));
return 0;
}
|
d1598c63371f7b23039395854b0395c452a3202e.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* NOTE: this file is generated by selection_faiss_00_generate.py
*
* Make changes there and run in this directory:
*
* > python selection_faiss_00_generate.py
*
*/
#include <cstddef> // size_t
#include <cstdint> // uint32_t
#include <raft/neighbors/detail/selection_faiss-inl.cuh>
#define instantiate_raft_neighbors_detail_select_k(payload_t, key_t) \
template void raft::neighbors::detail::select_k(const key_t* inK, \
const payload_t* inV, \
size_t n_rows, \
size_t n_cols, \
key_t* outK, \
payload_t* outV, \
bool select_min, \
int k, \
hipStream_t stream)
instantiate_raft_neighbors_detail_select_k(uint32_t, float);
#undef instantiate_raft_neighbors_detail_select_k
| d1598c63371f7b23039395854b0395c452a3202e.cu |
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* NOTE: this file is generated by selection_faiss_00_generate.py
*
* Make changes there and run in this directory:
*
* > python selection_faiss_00_generate.py
*
*/
#include <cstddef> // size_t
#include <cstdint> // uint32_t
#include <raft/neighbors/detail/selection_faiss-inl.cuh>
#define instantiate_raft_neighbors_detail_select_k(payload_t, key_t) \
template void raft::neighbors::detail::select_k(const key_t* inK, \
const payload_t* inV, \
size_t n_rows, \
size_t n_cols, \
key_t* outK, \
payload_t* outV, \
bool select_min, \
int k, \
cudaStream_t stream)
instantiate_raft_neighbors_detail_select_k(uint32_t, float);
#undef instantiate_raft_neighbors_detail_select_k
|
612b8c77755f05372bb0dd12687e56ad42e67191.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "sub_mul_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *g_out = NULL;
hipMalloc(&g_out, XSIZE*YSIZE);
double *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
double *b1 = NULL;
hipMalloc(&b1, XSIZE*YSIZE);
double *b2 = NULL;
hipMalloc(&b2, XSIZE*YSIZE);
double *ct = NULL;
hipMalloc(&ct, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
sub_mul_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, g_out,a,b1,b2,ct,n);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
sub_mul_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, g_out,a,b1,b2,ct,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
sub_mul_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, g_out,a,b1,b2,ct,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 612b8c77755f05372bb0dd12687e56ad42e67191.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "sub_mul_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *g_out = NULL;
cudaMalloc(&g_out, XSIZE*YSIZE);
double *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
double *b1 = NULL;
cudaMalloc(&b1, XSIZE*YSIZE);
double *b2 = NULL;
cudaMalloc(&b2, XSIZE*YSIZE);
double *ct = NULL;
cudaMalloc(&ct, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
sub_mul_kernel<<<gridBlock,threadBlock>>>(g_out,a,b1,b2,ct,n);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
sub_mul_kernel<<<gridBlock,threadBlock>>>(g_out,a,b1,b2,ct,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
sub_mul_kernel<<<gridBlock,threadBlock>>>(g_out,a,b1,b2,ct,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
db6f74c0e7e43327cb077f5d27b14bf58a7fd3b5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/user/kernels/gather_kernel_util.h"
#include "oneflow/core/kernel/kernel.h"
#include "oneflow/core/ep/cuda/cuda_stream.h"
#include <assert.h>
namespace oneflow {
namespace {
template<typename K, typename IDX>
__device__ IDX GetInOffset(const IDX out_offset, const K* indices, const IDX num_indices,
const IDX gather_dim_size, const IDX inner_dim_size, const IDX offset) {
const IDX outer_dim_elem_cnt = num_indices * inner_dim_size;
const IDX outer_idx = out_offset / outer_dim_elem_cnt;
const IDX indices_idx = out_offset % outer_dim_elem_cnt / inner_dim_size;
const IDX inner_idx = out_offset % inner_dim_size;
assert(indices[indices_idx] >= 0);
const IDX idx = indices[indices_idx] - offset;
if (idx >= 0 && idx < gather_dim_size) {
return outer_idx * gather_dim_size * inner_dim_size + idx * inner_dim_size + inner_idx;
} else {
return -1;
}
}
template<typename T, typename K, typename IDX>
__global__ void GatherForwardGpu(const IDX elem_cnt, const K* indices, const IDX num_indices,
const T* in, const IDX gather_dim_size, const IDX inner_dim_size,
T* out, const IDX offset) {
CUDA_1D_KERNEL_LOOP_T(IDX, i, elem_cnt) {
const IDX in_offset =
GetInOffset<K, IDX>(i, indices, num_indices, gather_dim_size, inner_dim_size, offset);
if (in_offset < 0) {
out[i] = 0;
} else {
out[i] = in[in_offset];
}
}
}
bool IsSafeUseIndex32(const Shape& flat_in_shape, const int64_t num_indices) {
const int64_t in_elem_cnt = flat_in_shape.elem_cnt();
const int64_t out_elem_cnt = flat_in_shape.At(0) * num_indices * flat_in_shape.At(2);
return ::max(out_elem_cnt, in_elem_cnt) < GetMaxVal<int32_t>() / 2;
}
} // namespace
template<typename T, typename K>
struct GatherKernelUtilImpl<DeviceType::kGPU, T, K> final {
static void Forward(ep::Stream* stream, const K* indices, int64_t num_indices, const T* in,
const Shape& flat_in_shape, T* out, const int64_t offset) {
const int64_t out_elem_cnt = flat_in_shape.At(0) * num_indices * flat_in_shape.At(2);
if (IsSafeUseIndex32(flat_in_shape, num_indices)) {
hipLaunchKernelGGL(( GatherForwardGpu<T, K, int32_t>), dim3(BlocksNum4ThreadsNum(out_elem_cnt)), dim3(kCudaThreadsNumPerBlock),
0, stream->As<ep::CudaStream>()->cuda_stream(),
out_elem_cnt, indices, num_indices, in, flat_in_shape.At(1), flat_in_shape.At(2), out,
offset);
} else {
hipLaunchKernelGGL(( GatherForwardGpu<T, K, int64_t>), dim3(BlocksNum4ThreadsNum(out_elem_cnt)), dim3(kCudaThreadsNumPerBlock),
0, stream->As<ep::CudaStream>()->cuda_stream(),
out_elem_cnt, indices, num_indices, in, flat_in_shape.At(1), flat_in_shape.At(2), out,
offset);
}
}
};
template<typename K>
struct GatherKernelUtilImpl<DeviceType::kGPU, float16, K> final {
static void Forward(ep::Stream* stream, const K* indices, int64_t num_indices, const float16* in,
const Shape& flat_in_shape, float16* out, const int64_t offset) {
GatherKernelUtilImpl<DeviceType::kGPU, half, K>::Forward(
stream, indices, num_indices, reinterpret_cast<const half*>(in), flat_in_shape,
reinterpret_cast<half*>(out), offset);
}
};
#define INITIATE_GATHER_KERNEL_UTIL_GPU_IMPL(in_type_pair, index_type_pair) \
template struct GatherKernelUtilImpl<DeviceType::kGPU, OF_PP_PAIR_FIRST(in_type_pair), \
OF_PP_PAIR_FIRST(index_type_pair)>;
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INITIATE_GATHER_KERNEL_UTIL_GPU_IMPL, GATHER_DATA_TYPE_SEQ,
INDEX_DATA_TYPE_SEQ);
#undef INITIATE_GATHER_KERNEL_UTIL_GPU_IMPL
} // namespace oneflow
| db6f74c0e7e43327cb077f5d27b14bf58a7fd3b5.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/user/kernels/gather_kernel_util.h"
#include "oneflow/core/kernel/kernel.h"
#include "oneflow/core/ep/cuda/cuda_stream.h"
#include <assert.h>
namespace oneflow {
namespace {
template<typename K, typename IDX>
__device__ IDX GetInOffset(const IDX out_offset, const K* indices, const IDX num_indices,
const IDX gather_dim_size, const IDX inner_dim_size, const IDX offset) {
const IDX outer_dim_elem_cnt = num_indices * inner_dim_size;
const IDX outer_idx = out_offset / outer_dim_elem_cnt;
const IDX indices_idx = out_offset % outer_dim_elem_cnt / inner_dim_size;
const IDX inner_idx = out_offset % inner_dim_size;
assert(indices[indices_idx] >= 0);
const IDX idx = indices[indices_idx] - offset;
if (idx >= 0 && idx < gather_dim_size) {
return outer_idx * gather_dim_size * inner_dim_size + idx * inner_dim_size + inner_idx;
} else {
return -1;
}
}
template<typename T, typename K, typename IDX>
__global__ void GatherForwardGpu(const IDX elem_cnt, const K* indices, const IDX num_indices,
const T* in, const IDX gather_dim_size, const IDX inner_dim_size,
T* out, const IDX offset) {
CUDA_1D_KERNEL_LOOP_T(IDX, i, elem_cnt) {
const IDX in_offset =
GetInOffset<K, IDX>(i, indices, num_indices, gather_dim_size, inner_dim_size, offset);
if (in_offset < 0) {
out[i] = 0;
} else {
out[i] = in[in_offset];
}
}
}
bool IsSafeUseIndex32(const Shape& flat_in_shape, const int64_t num_indices) {
const int64_t in_elem_cnt = flat_in_shape.elem_cnt();
const int64_t out_elem_cnt = flat_in_shape.At(0) * num_indices * flat_in_shape.At(2);
return std::max(out_elem_cnt, in_elem_cnt) < GetMaxVal<int32_t>() / 2;
}
} // namespace
template<typename T, typename K>
struct GatherKernelUtilImpl<DeviceType::kGPU, T, K> final {
static void Forward(ep::Stream* stream, const K* indices, int64_t num_indices, const T* in,
const Shape& flat_in_shape, T* out, const int64_t offset) {
const int64_t out_elem_cnt = flat_in_shape.At(0) * num_indices * flat_in_shape.At(2);
if (IsSafeUseIndex32(flat_in_shape, num_indices)) {
GatherForwardGpu<T, K, int32_t><<<BlocksNum4ThreadsNum(out_elem_cnt), kCudaThreadsNumPerBlock,
0, stream->As<ep::CudaStream>()->cuda_stream()>>>(
out_elem_cnt, indices, num_indices, in, flat_in_shape.At(1), flat_in_shape.At(2), out,
offset);
} else {
GatherForwardGpu<T, K, int64_t><<<BlocksNum4ThreadsNum(out_elem_cnt), kCudaThreadsNumPerBlock,
0, stream->As<ep::CudaStream>()->cuda_stream()>>>(
out_elem_cnt, indices, num_indices, in, flat_in_shape.At(1), flat_in_shape.At(2), out,
offset);
}
}
};
template<typename K>
struct GatherKernelUtilImpl<DeviceType::kGPU, float16, K> final {
static void Forward(ep::Stream* stream, const K* indices, int64_t num_indices, const float16* in,
const Shape& flat_in_shape, float16* out, const int64_t offset) {
GatherKernelUtilImpl<DeviceType::kGPU, half, K>::Forward(
stream, indices, num_indices, reinterpret_cast<const half*>(in), flat_in_shape,
reinterpret_cast<half*>(out), offset);
}
};
#define INITIATE_GATHER_KERNEL_UTIL_GPU_IMPL(in_type_pair, index_type_pair) \
template struct GatherKernelUtilImpl<DeviceType::kGPU, OF_PP_PAIR_FIRST(in_type_pair), \
OF_PP_PAIR_FIRST(index_type_pair)>;
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INITIATE_GATHER_KERNEL_UTIL_GPU_IMPL, GATHER_DATA_TYPE_SEQ,
INDEX_DATA_TYPE_SEQ);
#undef INITIATE_GATHER_KERNEL_UTIL_GPU_IMPL
} // namespace oneflow
|
02242acbc593b1b17e37245a52dfdfbb21741c0c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*********************************************************************
* Copyright 2011-2012,
* Marwan Abdellah: <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation.
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
********************************************************************/
#ifndef _CU_SUBTRACT_1D_ARRAY_DEVICE_IMPL_CU_
#define _CU_SUBTRACT_1D_ARRAY_DEVICE_IMPL_CU_
#include "cuGlobals.h"
#include "Timers/Boost.h"
#include "Kernels/Constant_Subtract_1D_Array_Kernel.cu"
/*! Implementation for the Constant_Subtract_1D_Array_Kernel kernel.
*
* @param cuBlock
* Kernel block configuration.
*
* @param cuGrid
* Kernel grid configuration.
*
* @param devArrayInput
* Input device vector.
*
* @param constVal
* Constant value to be subtracted from the input device vector.
*
* @param devArrayOutput
* Output device vector.
*
* @param N
* Length of the input vector.
*
* @param profile
* GPU profiling structure.
*
*/
template <typename T>
extern
void cu_Constant_Subtract_1D_Array_Impl
(dim3 cuBlock, dim3 cuGrid,
T* devArrayInput, T constVal, T* devArrayOutput, int N,
cuProfile* profile)
{
// Create CUDA timer
cutCreateTimer(&(profile->kernelTime));
// Reset CUDA timer
cutResetTimer(profile->kernelTime);
// Start CUDA timer
cutStartTimer(profile->kernelTime);
// Execute the kernel
hipLaunchKernelGGL(( Constant_Subtract_1D_Array_Kernel)
, dim3(cuGrid), dim3(cuBlock) , 0, 0, devArrayInput, constVal, devArrayOutput, N);
// Stop CUDA timer
cutStopTimer(profile->kernelTime);
// Calculate kernel execution time
profile->kernelDuration = cutGetTimerValue(profile->kernelTime);
// Check successfull execution of the kernel
profile->kernelExecErr = hipPeekAtLastError();
}
/*! Instantiates cu_Constant_Subtract_1D_Array_Impl() with the explicitly specified template for
* input vector of type char.
*
* @param cuBlock
* Kernel block configuration.
*
* @param cuGrid
* Kernel grid configuration.
*
* @param devArrayInput
* Input device vector.
*
* @param constVal
* Constant value to be subtracted from the input device vector.
*
* @param devArrayOutput
* Output device vector.
*
* @param N
* Length of the input vector.
*
* @param profile
* GPU profiling structure.
*
*/
template
void cu_Constant_Subtract_1D_Array_Impl <char>
(dim3 cuBlock, dim3 cuGrid,
char *devArrayInput, char constVal, char* devArrayOutput, int N,
cuProfile* profile);
/*! Instantiates cu_Constant_Subtract_1D_Array_Impl() with the explicitly specified template for
* input vector of type char.
*
* @param cuBlock
* Kernel block configuration.
*
* @param cuGrid
* Kernel grid configuration.
*
* @param devArrayInput
* Input device vector.
*
* @param constVal
* Constant value to be subtracted from the input device vector.
*
* @param devArrayOutput
* Output device vector.
*
* @param N
* Length of the input vector.
*
* @param profile
* GPU profiling structure.
*
*/
template
void cu_Constant_Subtract_1D_Array_Impl <unsigned char>
(dim3 cuBlock, dim3 cuGrid,
unsigned char* devArrayInput, unsigned char constVal, unsigned char* devArrayOutput, int N,
cuProfile* profile);
/*! Instantiates cu_Constant_Subtract_1D_Array_Impl() with the explicitly specified template for
* input vector of type char.
*
* @param cuBlock
* Kernel block configuration.
*
* @param cuGrid
* Kernel grid configuration.
*
* @param devArrayInput
* Input device vector.
*
* @param constVal
* Constant value to be subtracted from the input device vector.
*
* @param devArrayOutput
* Output device vector.
*
* @param N
* Length of the input vector.
*
* @param profile
* GPU profiling structure.
*
*/
template
void cu_Constant_Subtract_1D_Array_Impl <int>
(dim3 cuBlock, dim3 cuGrid,
int* devArrayInput, int constVal, int* devArrayOutput, int N,
cuProfile* profile);
/*! Instantiates cu_Constant_Subtract_1D_Array_Impl() with the explicitly specified template for
* input vector of type char.
*
* @param cuBlock
* Kernel block configuration.
*
* @param cuGrid
* Kernel grid configuration.
*
* @param devArrayInput
* Input device vector.
*
* @param constVal
* Constant value to be subtracted from the input device vector.
*
* @param devArrayOutput
* Output device vector.
*
* @param N
* Length of the input vector.
*
* @param profile
* GPU profiling structure.
*
*/
template
void cu_Constant_Subtract_1D_Array_Impl <unsigned int>
(dim3 cuBlock, dim3 cuGrid,
unsigned int* devArrayInput, unsigned int constVal, unsigned int* devArrayOutput, int N,
cuProfile* profile);
/*! Instantiates cu_Constant_Subtract_1D_Array_Impl() with the explicitly specified template for
* input vector of type char.
*
* @param cuBlock
* Kernel block configuration.
*
* @param cuGrid
* Kernel grid configuration.
*
* @param devArrayInput
* Input device vector.
*
* @param constVal
* Constant value to be subtracted from the input device vector.
*
* @param devArrayOutput
* Output device vector.
*
* @param N
* Length of the input vector.
*
* @param profile
* GPU profiling structure.
*
*/
template
void cu_Constant_Subtract_1D_Array_Impl <float>
(dim3 cuBlock, dim3 cuGrid,
float* devArrayInput, float constVal, float* devArrayOutput, int N,
cuProfile* profile);
/*! Instantiates cu_Constant_Subtract_1D_Array_Impl() with the explicitly specified template for
* input vector of type char.
*
* @param cuBlock
* Kernel block configuration.
*
* @param cuGrid
* Kernel grid configuration.
*
* @param devArrayInput
* Input device vector.
*
* @param constVal
* Constant value to be subtracted from the input device vector.
*
* @param devArrayOutput
* Output device vector.
*
* @param N
* Length of the input vector.
*
* @param profile
* GPU profiling structure.
*
*/
template
void cu_Constant_Subtract_1D_Array_Impl <double>
(dim3 cuBlock, dim3 cuGrid,
double* devArrayInput, double constVal, double* devArrayOutput, int N,
cuProfile* profile);
#endif // _CU_SUBTRACT_1D_ARRAY_DEVICE_IMPL_CU_
| 02242acbc593b1b17e37245a52dfdfbb21741c0c.cu | /*********************************************************************
* Copyright © 2011-2012,
* Marwan Abdellah: <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation.
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
********************************************************************/
#ifndef _CU_SUBTRACT_1D_ARRAY_DEVICE_IMPL_CU_
#define _CU_SUBTRACT_1D_ARRAY_DEVICE_IMPL_CU_
#include "cuGlobals.h"
#include "Timers/Boost.h"
#include "Kernels/Constant_Subtract_1D_Array_Kernel.cu"
/*! Implementation for the Constant_Subtract_1D_Array_Kernel kernel.
*
* @param cuBlock
* Kernel block configuration.
*
* @param cuGrid
* Kernel grid configuration.
*
* @param devArrayInput
* Input device vector.
*
* @param constVal
* Constant value to be subtracted from the input device vector.
*
* @param devArrayOutput
* Output device vector.
*
* @param N
* Length of the input vector.
*
* @param profile
* GPU profiling structure.
*
*/
template <typename T>
extern
void cu_Constant_Subtract_1D_Array_Impl
(dim3 cuBlock, dim3 cuGrid,
T* devArrayInput, T constVal, T* devArrayOutput, int N,
cuProfile* profile)
{
// Create CUDA timer
cutCreateTimer(&(profile->kernelTime));
// Reset CUDA timer
cutResetTimer(profile->kernelTime);
// Start CUDA timer
cutStartTimer(profile->kernelTime);
// Execute the kernel
Constant_Subtract_1D_Array_Kernel
<<< cuGrid, cuBlock >>> (devArrayInput, constVal, devArrayOutput, N);
// Stop CUDA timer
cutStopTimer(profile->kernelTime);
// Calculate kernel execution time
profile->kernelDuration = cutGetTimerValue(profile->kernelTime);
// Check successfull execution of the kernel
profile->kernelExecErr = cudaPeekAtLastError();
}
/*! Instantiates cu_Constant_Subtract_1D_Array_Impl() with the explicitly specified template for
* input vector of type char.
*
* @param cuBlock
* Kernel block configuration.
*
* @param cuGrid
* Kernel grid configuration.
*
* @param devArrayInput
* Input device vector.
*
* @param constVal
* Constant value to be subtracted from the input device vector.
*
* @param devArrayOutput
* Output device vector.
*
* @param N
* Length of the input vector.
*
* @param profile
* GPU profiling structure.
*
*/
template
void cu_Constant_Subtract_1D_Array_Impl <char>
(dim3 cuBlock, dim3 cuGrid,
char *devArrayInput, char constVal, char* devArrayOutput, int N,
cuProfile* profile);
/*! Instantiates cu_Constant_Subtract_1D_Array_Impl() with the explicitly specified template for
* input vector of type char.
*
* @param cuBlock
* Kernel block configuration.
*
* @param cuGrid
* Kernel grid configuration.
*
* @param devArrayInput
* Input device vector.
*
* @param constVal
* Constant value to be subtracted from the input device vector.
*
* @param devArrayOutput
* Output device vector.
*
* @param N
* Length of the input vector.
*
* @param profile
* GPU profiling structure.
*
*/
template
void cu_Constant_Subtract_1D_Array_Impl <unsigned char>
(dim3 cuBlock, dim3 cuGrid,
unsigned char* devArrayInput, unsigned char constVal, unsigned char* devArrayOutput, int N,
cuProfile* profile);
/*! Instantiates cu_Constant_Subtract_1D_Array_Impl() with the explicitly specified template for
* input vector of type char.
*
* @param cuBlock
* Kernel block configuration.
*
* @param cuGrid
* Kernel grid configuration.
*
* @param devArrayInput
* Input device vector.
*
* @param constVal
* Constant value to be subtracted from the input device vector.
*
* @param devArrayOutput
* Output device vector.
*
* @param N
* Length of the input vector.
*
* @param profile
* GPU profiling structure.
*
*/
template
void cu_Constant_Subtract_1D_Array_Impl <int>
(dim3 cuBlock, dim3 cuGrid,
int* devArrayInput, int constVal, int* devArrayOutput, int N,
cuProfile* profile);
/*! Instantiates cu_Constant_Subtract_1D_Array_Impl() with the explicitly specified template for
* input vector of type char.
*
* @param cuBlock
* Kernel block configuration.
*
* @param cuGrid
* Kernel grid configuration.
*
* @param devArrayInput
* Input device vector.
*
* @param constVal
* Constant value to be subtracted from the input device vector.
*
* @param devArrayOutput
* Output device vector.
*
* @param N
* Length of the input vector.
*
* @param profile
* GPU profiling structure.
*
*/
template
void cu_Constant_Subtract_1D_Array_Impl <unsigned int>
(dim3 cuBlock, dim3 cuGrid,
unsigned int* devArrayInput, unsigned int constVal, unsigned int* devArrayOutput, int N,
cuProfile* profile);
/*! Instantiates cu_Constant_Subtract_1D_Array_Impl() with the explicitly specified template for
* input vector of type char.
*
* @param cuBlock
* Kernel block configuration.
*
* @param cuGrid
* Kernel grid configuration.
*
* @param devArrayInput
* Input device vector.
*
* @param constVal
* Constant value to be subtracted from the input device vector.
*
* @param devArrayOutput
* Output device vector.
*
* @param N
* Length of the input vector.
*
* @param profile
* GPU profiling structure.
*
*/
template
void cu_Constant_Subtract_1D_Array_Impl <float>
(dim3 cuBlock, dim3 cuGrid,
float* devArrayInput, float constVal, float* devArrayOutput, int N,
cuProfile* profile);
/*! Instantiates cu_Constant_Subtract_1D_Array_Impl() with the explicitly specified template for
* input vector of type char.
*
* @param cuBlock
* Kernel block configuration.
*
* @param cuGrid
* Kernel grid configuration.
*
* @param devArrayInput
* Input device vector.
*
* @param constVal
* Constant value to be subtracted from the input device vector.
*
* @param devArrayOutput
* Output device vector.
*
* @param N
* Length of the input vector.
*
* @param profile
* GPU profiling structure.
*
*/
template
void cu_Constant_Subtract_1D_Array_Impl <double>
(dim3 cuBlock, dim3 cuGrid,
double* devArrayInput, double constVal, double* devArrayOutput, int N,
cuProfile* profile);
#endif // _CU_SUBTRACT_1D_ARRAY_DEVICE_IMPL_CU_
|
9f48a35038a5735a90d18c76dfe1a45e247b8cf8.hip | // !!! This is a file automatically generated by hipify!!!
#include "GAMER.h"
extern real (*d_Rho_Array_P )[ RHO_NXT*RHO_NXT*RHO_NXT ];
extern real (*d_Pot_Array_P_In )[ POT_NXT*POT_NXT*POT_NXT ];
extern real (*d_Pot_Array_P_Out)[ GRA_NXT*GRA_NXT*GRA_NXT ];
extern real (*d_Flu_Array_G )[NCOMP][ PATCH_SIZE*PATCH_SIZE*PATCH_SIZE ];
// REMOVE in the actual implementation
// #########################################################################
extern hipStream_t *Stream;
extern int GPU_NSTREAM;
// #########################################################################
//-------------------------------------------------------------------------------------------------------
// Function : CUAPI_MemAllocate_PoissonGravity
// Description : Allocate device and host memory for the Poisson and Gravity solvers
//
// Parameter : Pot_NPatchGroup : Number of patch groups evaluated simultaneously by GPU
// Rho_Array_P : Array to store the input density for the Poisson solver
// Pot_Array_P_In : Array to store the input coarse-grid potential for the Poisson solver
// Pot_Array_P_Out : Array to store the output fine-grid potential for the Poisson solver
// Flu_Array_G : Array to store the input and output fluid variables for the gravity solver
//-------------------------------------------------------------------------------------------------------
void CUAPI_MemAllocate_PoissonGravity( const int Pot_NPatchGroup,
real (**Rho_Array_P )[RHO_NXT][RHO_NXT][RHO_NXT],
real (**Pot_Array_P_In )[POT_NXT][POT_NXT][POT_NXT],
real (**Pot_Array_P_Out)[GRA_NXT][GRA_NXT][GRA_NXT],
real (**Flu_Array_G )[NCOMP][PATCH_SIZE][PATCH_SIZE][PATCH_SIZE] )
{
const long Pot_NPatch = 8*Pot_NPatchGroup;
const long Rho_MemSize_P = sizeof(real)*Pot_NPatch*RHO_NXT *RHO_NXT *RHO_NXT;
const long Pot_MemSize_P_In = sizeof(real)*Pot_NPatch*POT_NXT *POT_NXT *POT_NXT;
const long Pot_MemSize_P_Out = sizeof(real)*Pot_NPatch*GRA_NXT *GRA_NXT *GRA_NXT;
const long Flu_MemSize_G = sizeof(real)*Pot_NPatch*PATCH_SIZE*PATCH_SIZE*PATCH_SIZE*NCOMP;
// output the total memory requirement
long TotalSize = Rho_MemSize_P + Pot_MemSize_P_In + Pot_MemSize_P_Out + Flu_MemSize_G;
// REPLACE in the actual implementation
// #########################################################################
// if ( MPI_Rank == 0 )
// #########################################################################
Aux_Message( stdout, "NOTE : total memory requirement in GPU gravity solver = %ld MB\n",
TotalSize/(1<<20) );
// allocate the device memory
CUDA_CHECK_ERROR( hipMalloc( (void**) &d_Rho_Array_P, Rho_MemSize_P ) );
CUDA_CHECK_ERROR( hipMalloc( (void**) &d_Pot_Array_P_In, Pot_MemSize_P_In ) );
CUDA_CHECK_ERROR( hipMalloc( (void**) &d_Pot_Array_P_Out, Pot_MemSize_P_Out ) );
CUDA_CHECK_ERROR( hipMalloc( (void**) &d_Flu_Array_G, Flu_MemSize_G ) );
// allocate the host memory by CUDA
// REPLACE in the actual implementation
// #########################################################################
for (int t=0; t<1; t++)
// for (int t=0; t<2; t++)
// #########################################################################
{
CUDA_CHECK_ERROR( hipHostMalloc( (void**) &Rho_Array_P [t], Rho_MemSize_P ) );
CUDA_CHECK_ERROR( hipHostMalloc( (void**) &Pot_Array_P_In [t], Pot_MemSize_P_In ) );
CUDA_CHECK_ERROR( hipHostMalloc( (void**) &Pot_Array_P_Out[t], Pot_MemSize_P_Out ) );
CUDA_CHECK_ERROR( hipHostMalloc( (void**) &Flu_Array_G [t], Flu_MemSize_G ) );
}
// REMOVE in the actual implementation
// #########################################################################
// create streams
Stream = new hipStream_t [GPU_NSTREAM];
for (int s=0; s<GPU_NSTREAM; s++) hipStreamCreate( &Stream[s] );
// #########################################################################
} // FUNCTION : CUAPI_MemAllocate_PoissonGravity
| 9f48a35038a5735a90d18c76dfe1a45e247b8cf8.cu | #include "GAMER.h"
extern real (*d_Rho_Array_P )[ RHO_NXT*RHO_NXT*RHO_NXT ];
extern real (*d_Pot_Array_P_In )[ POT_NXT*POT_NXT*POT_NXT ];
extern real (*d_Pot_Array_P_Out)[ GRA_NXT*GRA_NXT*GRA_NXT ];
extern real (*d_Flu_Array_G )[NCOMP][ PATCH_SIZE*PATCH_SIZE*PATCH_SIZE ];
// REMOVE in the actual implementation
// #########################################################################
extern cudaStream_t *Stream;
extern int GPU_NSTREAM;
// #########################################################################
//-------------------------------------------------------------------------------------------------------
// Function : CUAPI_MemAllocate_PoissonGravity
// Description : Allocate device and host memory for the Poisson and Gravity solvers
//
// Parameter : Pot_NPatchGroup : Number of patch groups evaluated simultaneously by GPU
// Rho_Array_P : Array to store the input density for the Poisson solver
// Pot_Array_P_In : Array to store the input coarse-grid potential for the Poisson solver
// Pot_Array_P_Out : Array to store the output fine-grid potential for the Poisson solver
// Flu_Array_G : Array to store the input and output fluid variables for the gravity solver
//-------------------------------------------------------------------------------------------------------
void CUAPI_MemAllocate_PoissonGravity( const int Pot_NPatchGroup,
real (**Rho_Array_P )[RHO_NXT][RHO_NXT][RHO_NXT],
real (**Pot_Array_P_In )[POT_NXT][POT_NXT][POT_NXT],
real (**Pot_Array_P_Out)[GRA_NXT][GRA_NXT][GRA_NXT],
real (**Flu_Array_G )[NCOMP][PATCH_SIZE][PATCH_SIZE][PATCH_SIZE] )
{
const long Pot_NPatch = 8*Pot_NPatchGroup;
const long Rho_MemSize_P = sizeof(real)*Pot_NPatch*RHO_NXT *RHO_NXT *RHO_NXT;
const long Pot_MemSize_P_In = sizeof(real)*Pot_NPatch*POT_NXT *POT_NXT *POT_NXT;
const long Pot_MemSize_P_Out = sizeof(real)*Pot_NPatch*GRA_NXT *GRA_NXT *GRA_NXT;
const long Flu_MemSize_G = sizeof(real)*Pot_NPatch*PATCH_SIZE*PATCH_SIZE*PATCH_SIZE*NCOMP;
// output the total memory requirement
long TotalSize = Rho_MemSize_P + Pot_MemSize_P_In + Pot_MemSize_P_Out + Flu_MemSize_G;
// REPLACE in the actual implementation
// #########################################################################
// if ( MPI_Rank == 0 )
// #########################################################################
Aux_Message( stdout, "NOTE : total memory requirement in GPU gravity solver = %ld MB\n",
TotalSize/(1<<20) );
// allocate the device memory
CUDA_CHECK_ERROR( cudaMalloc( (void**) &d_Rho_Array_P, Rho_MemSize_P ) );
CUDA_CHECK_ERROR( cudaMalloc( (void**) &d_Pot_Array_P_In, Pot_MemSize_P_In ) );
CUDA_CHECK_ERROR( cudaMalloc( (void**) &d_Pot_Array_P_Out, Pot_MemSize_P_Out ) );
CUDA_CHECK_ERROR( cudaMalloc( (void**) &d_Flu_Array_G, Flu_MemSize_G ) );
// allocate the host memory by CUDA
// REPLACE in the actual implementation
// #########################################################################
for (int t=0; t<1; t++)
// for (int t=0; t<2; t++)
// #########################################################################
{
CUDA_CHECK_ERROR( cudaMallocHost( (void**) &Rho_Array_P [t], Rho_MemSize_P ) );
CUDA_CHECK_ERROR( cudaMallocHost( (void**) &Pot_Array_P_In [t], Pot_MemSize_P_In ) );
CUDA_CHECK_ERROR( cudaMallocHost( (void**) &Pot_Array_P_Out[t], Pot_MemSize_P_Out ) );
CUDA_CHECK_ERROR( cudaMallocHost( (void**) &Flu_Array_G [t], Flu_MemSize_G ) );
}
// REMOVE in the actual implementation
// #########################################################################
// create streams
Stream = new cudaStream_t [GPU_NSTREAM];
for (int s=0; s<GPU_NSTREAM; s++) cudaStreamCreate( &Stream[s] );
// #########################################################################
} // FUNCTION : CUAPI_MemAllocate_PoissonGravity
|
58cbaaae30cd93540acd65d5f6b951087838c459.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void kernel()
{
int num = threadIdx.x + blockIdx.x * blockDim.x;
printf("Thread index: %d\n",num);
}
int main()
{
hipLaunchKernelGGL(( kernel), dim3(4), dim3(2), 0, 0, );
hipDeviceSynchronize();
return 0;
}
| 58cbaaae30cd93540acd65d5f6b951087838c459.cu | #include <stdio.h>
__global__ void kernel()
{
int num = threadIdx.x + blockIdx.x * blockDim.x;
printf("Thread index: %d\n",num);
}
int main()
{
kernel<<<4, 2>>>();
cudaDeviceSynchronize();
return 0;
}
|
fbccde16da6e1faebc2e12ca5cc0e36c8e509b1d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <device_launch_parameters.h>
#include "caffe/layers/softmax_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void kernel_channel_max(const int num, const int channels,
const int spatial_dim, const Dtype* data, Dtype* out) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype maxval = -max_dtype<Dtype>();
for (int c = 0; c < channels; ++c) {
maxval = max(data[(n * channels + c) * spatial_dim + s], maxval);
}
out[index] = maxval;
}
}
template <typename Dtype>
__global__ void kernel_channel_subtract(const int count,
const int num, const int channels,
const int spatial_dim, const Dtype* channel_max, Dtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] -= channel_max[n * spatial_dim + s];
}
}
template <typename Dtype>
__global__ void kernel_exp(const int count, const Dtype* data, Dtype* out) {
CUDA_KERNEL_LOOP(index, count) {
out[index] = exp(data[index]);
}
}
template <typename Dtype>
__global__ void kernel_channel_sum(const int num, const int channels,
const int spatial_dim, const Dtype* data, Dtype* channel_sum) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype sum = 0.F;
for (int c = 0; c < channels; ++c) {
sum += data[(n * channels + c) * spatial_dim + s];
}
channel_sum[index] = sum;
}
}
template <typename Dtype>
__global__ void kernel_channel_div(const int count,
const int num, const int channels,
const int spatial_dim, const Dtype* channel_sum, Dtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] /= channel_sum[n * spatial_dim + s];
}
}
template <typename Dtype>
__global__ void kernel_channel_dot(const int num, const int channels,
const int spatial_dim, const Dtype* data_1, const Dtype* data_2,
Dtype* channel_dot) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype dot = 0.F;
for (int c = 0; c < channels; ++c) {
dot += (data_1[(n * channels + c) * spatial_dim + s]
* data_2[(n * channels + c) * spatial_dim + s]);
}
channel_dot[index] = dot;
}
}
template <typename Ftype, typename Btype>
void SoftmaxLayer<Ftype, Btype>::Forward_gpu(const vector<Blob*>& bottom,
const vector<Blob*>& top) {
const Ftype* bottom_data = bottom[0]->gpu_data<Ftype>();
Ftype* top_data = top[0]->mutable_gpu_data<Ftype>();
Ftype* scale_data = scale_.template mutable_gpu_data<Ftype>();
int count = bottom[0]->count();
int channels = top[0]->shape(softmax_axis_);
caffe_copy(count, bottom_data, top_data);
hipStream_t stream = Caffe::thread_stream();
// We need to subtract the max to avoid numerical issues, compute the exp,
// and then normalize.
// compute max
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_max), dim3(CAFFE_GET_BLOCKS(outer_num_ * inner_num_)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, stream, outer_num_, channels, inner_num_, top_data,
scale_data);
// subtract
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_subtract), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, stream, count, outer_num_, channels, inner_num_,
scale_data, top_data);
// exponentiate
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_exp), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream,
count, top_data, top_data);
// sum after exp
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_sum), dim3(CAFFE_GET_BLOCKS(outer_num_ * inner_num_)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, stream, outer_num_, channels, inner_num_, top_data,
scale_data);
// divide
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_div), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, stream, count, outer_num_, channels, inner_num_,
scale_data, top_data);
CUDA_CHECK(hipStreamSynchronize(stream));
}
template <typename Ftype, typename Btype>
void SoftmaxLayer<Ftype, Btype>::Backward_gpu(const vector<Blob*>& top,
const vector<bool>& propagate_down, const vector<Blob*>& bottom) {
const Btype* top_diff = top[0]->gpu_diff<Btype>();
const Btype* top_data = top[0]->gpu_data<Btype>();
Btype* bottom_diff = bottom[0]->mutable_gpu_diff<Btype>();
Btype* scale_data = scale_.template mutable_gpu_data<Btype>();
int count = top[0]->count();
int channels = top[0]->shape(softmax_axis_);
caffe_copy(count, top_diff, bottom_diff);
hipStream_t stream = Caffe::thread_stream();
// Compute inner1d(top_diff, top_data) and subtract them from the bottom diff.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_dot), dim3(CAFFE_GET_BLOCKS(outer_num_ * inner_num_)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, stream, outer_num_, channels, inner_num_,
top_diff, top_data, scale_data);
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_subtract), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, stream, count, outer_num_, channels, inner_num_,
scale_data, bottom_diff);
CUDA_CHECK(hipStreamSynchronize(stream));
// elementwise multiplication
caffe_gpu_mul(top[0]->count(), bottom_diff, top_data, bottom_diff);
}
INSTANTIATE_LAYER_GPU_FUNCS_FB(SoftmaxLayer);
} // namespace caffe
| fbccde16da6e1faebc2e12ca5cc0e36c8e509b1d.cu | #include <algorithm>
#include <device_launch_parameters.h>
#include "caffe/layers/softmax_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void kernel_channel_max(const int num, const int channels,
const int spatial_dim, const Dtype* data, Dtype* out) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype maxval = -max_dtype<Dtype>();
for (int c = 0; c < channels; ++c) {
maxval = max(data[(n * channels + c) * spatial_dim + s], maxval);
}
out[index] = maxval;
}
}
template <typename Dtype>
__global__ void kernel_channel_subtract(const int count,
const int num, const int channels,
const int spatial_dim, const Dtype* channel_max, Dtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] -= channel_max[n * spatial_dim + s];
}
}
template <typename Dtype>
__global__ void kernel_exp(const int count, const Dtype* data, Dtype* out) {
CUDA_KERNEL_LOOP(index, count) {
out[index] = exp(data[index]);
}
}
template <typename Dtype>
__global__ void kernel_channel_sum(const int num, const int channels,
const int spatial_dim, const Dtype* data, Dtype* channel_sum) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype sum = 0.F;
for (int c = 0; c < channels; ++c) {
sum += data[(n * channels + c) * spatial_dim + s];
}
channel_sum[index] = sum;
}
}
template <typename Dtype>
__global__ void kernel_channel_div(const int count,
const int num, const int channels,
const int spatial_dim, const Dtype* channel_sum, Dtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] /= channel_sum[n * spatial_dim + s];
}
}
template <typename Dtype>
__global__ void kernel_channel_dot(const int num, const int channels,
const int spatial_dim, const Dtype* data_1, const Dtype* data_2,
Dtype* channel_dot) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype dot = 0.F;
for (int c = 0; c < channels; ++c) {
dot += (data_1[(n * channels + c) * spatial_dim + s]
* data_2[(n * channels + c) * spatial_dim + s]);
}
channel_dot[index] = dot;
}
}
template <typename Ftype, typename Btype>
void SoftmaxLayer<Ftype, Btype>::Forward_gpu(const vector<Blob*>& bottom,
const vector<Blob*>& top) {
const Ftype* bottom_data = bottom[0]->gpu_data<Ftype>();
Ftype* top_data = top[0]->mutable_gpu_data<Ftype>();
Ftype* scale_data = scale_.template mutable_gpu_data<Ftype>();
int count = bottom[0]->count();
int channels = top[0]->shape(softmax_axis_);
caffe_copy(count, bottom_data, top_data);
cudaStream_t stream = Caffe::thread_stream();
// We need to subtract the max to avoid numerical issues, compute the exp,
// and then normalize.
// compute max
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_max<<<CAFFE_GET_BLOCKS(outer_num_ * inner_num_),
CAFFE_CUDA_NUM_THREADS, 0, stream>>>(outer_num_, channels, inner_num_, top_data,
scale_data);
// subtract
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_subtract<<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS, 0, stream>>>(count, outer_num_, channels, inner_num_,
scale_data, top_data);
// exponentiate
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_exp<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS, 0, stream>>>(
count, top_data, top_data);
// sum after exp
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_sum<<<CAFFE_GET_BLOCKS(outer_num_ * inner_num_),
CAFFE_CUDA_NUM_THREADS, 0, stream>>>(outer_num_, channels, inner_num_, top_data,
scale_data);
// divide
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_div<<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS, 0, stream>>>(count, outer_num_, channels, inner_num_,
scale_data, top_data);
CUDA_CHECK(cudaStreamSynchronize(stream));
}
template <typename Ftype, typename Btype>
void SoftmaxLayer<Ftype, Btype>::Backward_gpu(const vector<Blob*>& top,
const vector<bool>& propagate_down, const vector<Blob*>& bottom) {
const Btype* top_diff = top[0]->gpu_diff<Btype>();
const Btype* top_data = top[0]->gpu_data<Btype>();
Btype* bottom_diff = bottom[0]->mutable_gpu_diff<Btype>();
Btype* scale_data = scale_.template mutable_gpu_data<Btype>();
int count = top[0]->count();
int channels = top[0]->shape(softmax_axis_);
caffe_copy(count, top_diff, bottom_diff);
cudaStream_t stream = Caffe::thread_stream();
// Compute inner1d(top_diff, top_data) and subtract them from the bottom diff.
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_dot<<<CAFFE_GET_BLOCKS(outer_num_ * inner_num_),
CAFFE_CUDA_NUM_THREADS, 0, stream>>>(outer_num_, channels, inner_num_,
top_diff, top_data, scale_data);
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_subtract<<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS, 0, stream>>>(count, outer_num_, channels, inner_num_,
scale_data, bottom_diff);
CUDA_CHECK(cudaStreamSynchronize(stream));
// elementwise multiplication
caffe_gpu_mul(top[0]->count(), bottom_diff, top_data, bottom_diff);
}
INSTANTIATE_LAYER_GPU_FUNCS_FB(SoftmaxLayer);
} // namespace caffe
|
f1194a3c30549cae69bd0c11557bc665a1f081db.hip | // !!! This is a file automatically generated by hipify!!!
#include "cuda_utils.hpp"
#include "vectorarray.hpp"
namespace soa {
// ===================== Kernel Vector Construction =====================================
template <int N>
KVectorArray1D<N>::KVectorArray1D(double *const (&D_ptrs)[N], int Nx) : Nx(Nx) {
for (int i = 0; i < N; i++)
this->D_ptrs[i] = D_ptrs[i];
}
template <int N>
KVectorArray2D<N>::KVectorArray2D(double *const (&D_ptrs)[N], int Nx, int Ny)
: Nx(Nx), Ny(Ny) {
for (int i = 0; i < N; i++)
this->D_ptrs[i] = D_ptrs[i];
}
template <int N>
KVectorArray3D<N>::KVectorArray3D(double *const (&D_ptrs)[N], int Nx, int Ny, int Nz)
: Nx(Nx), Ny(Ny), Nz(Nz) {
for (int i = 0; i < N; i++)
this->D_ptrs[i] = D_ptrs[i];
}
// ================ VectorArray1D Construction and move assignment ========================
template <int N> VectorArray1D<N>::VectorArray1D(int Nx) : Nx(Nx) {
double *H_ptr, *D_ptr;
check(hipHostMalloc(&H_ptr, sizeof(double) * Nx * N));
check(hipMalloc(&D_ptr, sizeof(double) * Nx * N));
for (int i = 0; i < N; i++) {
H_ptrs[i] = H_ptr + Nx * i;
D_ptrs[i] = D_ptr + Nx * i;
}
}
template <int N>
VectorArray1D<N>::VectorArray1D(int Nx, std::function<Vector<N>(int)> f)
: VectorArray1D(Nx) {
for (int i = 0; i < Nx; i++)
this->operator()(i) = f(i);
check(hipMemcpy(D_ptrs[0], H_ptrs[0], sizeof(double) * Nx * N,
hipMemcpyHostToDevice));
}
template <int N> VectorArray1D<N>::~VectorArray1D() {
check(hipHostFree(H_ptrs[0]));
check(hipFree(D_ptrs[0]));
}
template <int N>
VectorArray1D<N>::VectorArray1D(VectorArray1D<N> &&rhs) noexcept : Nx(rhs.Nx) {
for (int i = 0; i < N; i++) {
H_ptrs[i] = rhs.H_ptrs[i];
D_ptrs[i] = rhs.D_ptrs[i];
rhs.H_ptrs[i] = nullptr;
rhs.D_ptrs[i] = nullptr;
}
}
template <int N>
VectorArray1D<N> &VectorArray1D<N>::operator=(VectorArray1D<N> &&rhs) noexcept {
if (this != &rhs) {
for (int i = 0; i < N; i++) {
H_ptrs[i] = rhs.H_ptrs[i];
D_ptrs[i] = rhs.D_ptrs[i];
rhs.H_ptrs[i] = nullptr;
rhs.D_ptrs[i] = nullptr;
}
Nx = rhs.Nx;
}
return *this;
}
// ================ VectorArray2D Construction and move assignment ========================
template <int N> VectorArray2D<N>::VectorArray2D(int Nx, int Ny) : Nx(Nx) ,Ny(Ny){
double *H_ptr, *D_ptr;
check(hipHostMalloc(&H_ptr, sizeof(double) * Nx * Ny * N));
check(hipMalloc(&D_ptr, sizeof(double) * Nx * Ny * N));
for (int i = 0; i < N; i++) {
H_ptrs[i] = H_ptr + Nx * Ny * i;
D_ptrs[i] = D_ptr + Nx * Ny * i;
}
}
template <int N>
VectorArray2D<N>::VectorArray2D(int Nx, int Ny,
std::function<Vector<N>(int, int)> f)
: VectorArray2D(Nx, Ny) {
for (int j = 0; j < Ny; j++)
for (int i = 0; i < Nx; i++)
this->operator()(i, j) = f(i, j);
check(hipMemcpy(D_ptrs[0], H_ptrs[0], sizeof(double) * Nx * Ny * N,
hipMemcpyHostToDevice));
}
template <int N> VectorArray2D<N>::~VectorArray2D() {
check(hipHostFree(H_ptrs[0]));
check(hipFree(D_ptrs[0]));
}
template <int N>
VectorArray2D<N>::VectorArray2D(VectorArray2D<N> &&rhs) noexcept : Nx(rhs.Nx) , Ny(rhs.Ny){
for (int i = 0; i < N; i++) {
H_ptrs[i] = rhs.H_ptrs[i];
D_ptrs[i] = rhs.D_ptrs[i];
rhs.H_ptrs[i] = nullptr;
rhs.D_ptrs[i] = nullptr;
}
}
template <int N>
VectorArray2D<N> &VectorArray2D<N>::operator=(VectorArray2D<N> &&rhs) noexcept {
if (this != &rhs) {
for (int i = 0; i < N; i++) {
H_ptrs[i] = rhs.H_ptrs[i];
D_ptrs[i] = rhs.D_ptrs[i];
rhs.H_ptrs[i] = nullptr;
rhs.D_ptrs[i] = nullptr;
}
Nx = rhs.Nx;
Ny = rhs.Ny;
}
return *this;
}
// ================ VectorArray3D Construction and move assignment ========================
template <int N> VectorArray3D<N>::VectorArray3D(int Nx, int Ny, int Nz) : Nx(Nx) ,Ny(Ny), Nz(Nz){
double *H_ptr, *D_ptr;
check(hipHostMalloc(&H_ptr, sizeof(double) * Nx * Ny * Nz * N));
check(hipMalloc(&D_ptr, sizeof(double) * Nx * Ny * Nz * N));
for (int i = 0; i < N; i++) {
H_ptrs[i] = H_ptr + Nx * Ny * Nz * i;
D_ptrs[i] = D_ptr + Nx * Ny * Nz * i;
}
}
template <int N>
VectorArray3D<N>::VectorArray3D(int Nx, int Ny, int Nz,
std::function<Vector<N>(int, int, int)> f)
: VectorArray3D(Nx, Ny, Nz) {
for (int k = 0; k < Nz; k++)
for (int j = 0; j < Ny; j++)
for (int i = 0; i < Nx; i++)
this->operator()(i, j, k) = f(i, j, k);
check(hipMemcpy(D_ptrs[0], H_ptrs[0], sizeof(double) * Nx * Ny * Nz * N,
hipMemcpyHostToDevice));
}
template <int N> VectorArray3D<N>::~VectorArray3D() {
check(hipHostFree(H_ptrs[0]));
check(hipFree(D_ptrs[0]));
}
template <int N>
VectorArray3D<N>::VectorArray3D(VectorArray3D<N> &&rhs) noexcept
: Nx(rhs.Nx), Ny(rhs.Ny), Nz(rhs.Nz) {
for (int i = 0; i < N; i++) {
H_ptrs[i] = rhs.H_ptrs[i];
D_ptrs[i] = rhs.D_ptrs[i];
rhs.H_ptrs[i] = nullptr;
rhs.D_ptrs[i] = nullptr;
}
}
template <int N>
VectorArray3D<N> &VectorArray3D<N>::operator=(VectorArray3D<N> &&rhs) noexcept {
if (this != &rhs) {
for (int i = 0; i < N; i++) {
H_ptrs[i] = rhs.H_ptrs[i];
D_ptrs[i] = rhs.D_ptrs[i];
rhs.H_ptrs[i] = nullptr;
rhs.D_ptrs[i] = nullptr;
}
Nx = rhs.Nx;
Ny = rhs.Ny;
Nz = rhs.Nz;
}
return *this;
}
template <int N> void VectorArray1D<N>::DeviceToHost() {
check(hipMemcpy(H_ptrs[0], D_ptrs[0], sizeof(double) * Nx * N,
hipMemcpyDeviceToHost));
}
template <int N> void VectorArray1D<N>::HostToDevice() {
check(hipMemcpy(D_ptrs[0], H_ptrs[0], sizeof(double) * Nx * N,
hipMemcpyHostToDevice));
}
template <int N> void VectorArray2D<N>::DeviceToHost() {
check(hipMemcpy(H_ptrs[0], D_ptrs[0], sizeof(double) * Nx * Ny * N,
hipMemcpyDeviceToHost));
}
template <int N> void VectorArray2D<N>::HostToDevice() {
check(hipMemcpy(D_ptrs[0], H_ptrs[0], sizeof(double) * Nx * Ny * N,
hipMemcpyHostToDevice));
}
template <int N> void VectorArray3D<N>::DeviceToHost() {
check(hipMemcpy(H_ptrs[0], D_ptrs[0], sizeof(double) * Nx * Ny * Nz * N,
hipMemcpyDeviceToHost));
}
template <int N> void VectorArray3D<N>::HostToDevice() {
check(hipMemcpy(D_ptrs[0], H_ptrs[0], sizeof(double) * Nx * Ny * Nz * N,
hipMemcpyHostToDevice));
}
} // namespace soa
#include "instanciation.ipp"
| f1194a3c30549cae69bd0c11557bc665a1f081db.cu | #include "cuda_utils.hpp"
#include "vectorarray.hpp"
namespace soa {
// ===================== Kernel Vector Construction =====================================
template <int N>
KVectorArray1D<N>::KVectorArray1D(double *const (&D_ptrs)[N], int Nx) : Nx(Nx) {
for (int i = 0; i < N; i++)
this->D_ptrs[i] = D_ptrs[i];
}
template <int N>
KVectorArray2D<N>::KVectorArray2D(double *const (&D_ptrs)[N], int Nx, int Ny)
: Nx(Nx), Ny(Ny) {
for (int i = 0; i < N; i++)
this->D_ptrs[i] = D_ptrs[i];
}
template <int N>
KVectorArray3D<N>::KVectorArray3D(double *const (&D_ptrs)[N], int Nx, int Ny, int Nz)
: Nx(Nx), Ny(Ny), Nz(Nz) {
for (int i = 0; i < N; i++)
this->D_ptrs[i] = D_ptrs[i];
}
// ================ VectorArray1D Construction and move assignment ========================
template <int N> VectorArray1D<N>::VectorArray1D(int Nx) : Nx(Nx) {
double *H_ptr, *D_ptr;
check(cudaMallocHost(&H_ptr, sizeof(double) * Nx * N));
check(cudaMalloc(&D_ptr, sizeof(double) * Nx * N));
for (int i = 0; i < N; i++) {
H_ptrs[i] = H_ptr + Nx * i;
D_ptrs[i] = D_ptr + Nx * i;
}
}
template <int N>
VectorArray1D<N>::VectorArray1D(int Nx, std::function<Vector<N>(int)> f)
: VectorArray1D(Nx) {
for (int i = 0; i < Nx; i++)
this->operator()(i) = f(i);
check(cudaMemcpy(D_ptrs[0], H_ptrs[0], sizeof(double) * Nx * N,
cudaMemcpyHostToDevice));
}
template <int N> VectorArray1D<N>::~VectorArray1D() {
check(cudaFreeHost(H_ptrs[0]));
check(cudaFree(D_ptrs[0]));
}
template <int N>
VectorArray1D<N>::VectorArray1D(VectorArray1D<N> &&rhs) noexcept : Nx(rhs.Nx) {
for (int i = 0; i < N; i++) {
H_ptrs[i] = rhs.H_ptrs[i];
D_ptrs[i] = rhs.D_ptrs[i];
rhs.H_ptrs[i] = nullptr;
rhs.D_ptrs[i] = nullptr;
}
}
template <int N>
VectorArray1D<N> &VectorArray1D<N>::operator=(VectorArray1D<N> &&rhs) noexcept {
if (this != &rhs) {
for (int i = 0; i < N; i++) {
H_ptrs[i] = rhs.H_ptrs[i];
D_ptrs[i] = rhs.D_ptrs[i];
rhs.H_ptrs[i] = nullptr;
rhs.D_ptrs[i] = nullptr;
}
Nx = rhs.Nx;
}
return *this;
}
// ================ VectorArray2D Construction and move assignment ========================
template <int N> VectorArray2D<N>::VectorArray2D(int Nx, int Ny) : Nx(Nx) ,Ny(Ny){
double *H_ptr, *D_ptr;
check(cudaMallocHost(&H_ptr, sizeof(double) * Nx * Ny * N));
check(cudaMalloc(&D_ptr, sizeof(double) * Nx * Ny * N));
for (int i = 0; i < N; i++) {
H_ptrs[i] = H_ptr + Nx * Ny * i;
D_ptrs[i] = D_ptr + Nx * Ny * i;
}
}
template <int N>
VectorArray2D<N>::VectorArray2D(int Nx, int Ny,
std::function<Vector<N>(int, int)> f)
: VectorArray2D(Nx, Ny) {
for (int j = 0; j < Ny; j++)
for (int i = 0; i < Nx; i++)
this->operator()(i, j) = f(i, j);
check(cudaMemcpy(D_ptrs[0], H_ptrs[0], sizeof(double) * Nx * Ny * N,
cudaMemcpyHostToDevice));
}
template <int N> VectorArray2D<N>::~VectorArray2D() {
check(cudaFreeHost(H_ptrs[0]));
check(cudaFree(D_ptrs[0]));
}
template <int N>
VectorArray2D<N>::VectorArray2D(VectorArray2D<N> &&rhs) noexcept : Nx(rhs.Nx) , Ny(rhs.Ny){
for (int i = 0; i < N; i++) {
H_ptrs[i] = rhs.H_ptrs[i];
D_ptrs[i] = rhs.D_ptrs[i];
rhs.H_ptrs[i] = nullptr;
rhs.D_ptrs[i] = nullptr;
}
}
template <int N>
VectorArray2D<N> &VectorArray2D<N>::operator=(VectorArray2D<N> &&rhs) noexcept {
if (this != &rhs) {
for (int i = 0; i < N; i++) {
H_ptrs[i] = rhs.H_ptrs[i];
D_ptrs[i] = rhs.D_ptrs[i];
rhs.H_ptrs[i] = nullptr;
rhs.D_ptrs[i] = nullptr;
}
Nx = rhs.Nx;
Ny = rhs.Ny;
}
return *this;
}
// ================ VectorArray3D Construction and move assignment ========================
template <int N> VectorArray3D<N>::VectorArray3D(int Nx, int Ny, int Nz) : Nx(Nx) ,Ny(Ny), Nz(Nz){
double *H_ptr, *D_ptr;
check(cudaMallocHost(&H_ptr, sizeof(double) * Nx * Ny * Nz * N));
check(cudaMalloc(&D_ptr, sizeof(double) * Nx * Ny * Nz * N));
for (int i = 0; i < N; i++) {
H_ptrs[i] = H_ptr + Nx * Ny * Nz * i;
D_ptrs[i] = D_ptr + Nx * Ny * Nz * i;
}
}
template <int N>
VectorArray3D<N>::VectorArray3D(int Nx, int Ny, int Nz,
std::function<Vector<N>(int, int, int)> f)
: VectorArray3D(Nx, Ny, Nz) {
for (int k = 0; k < Nz; k++)
for (int j = 0; j < Ny; j++)
for (int i = 0; i < Nx; i++)
this->operator()(i, j, k) = f(i, j, k);
check(cudaMemcpy(D_ptrs[0], H_ptrs[0], sizeof(double) * Nx * Ny * Nz * N,
cudaMemcpyHostToDevice));
}
template <int N> VectorArray3D<N>::~VectorArray3D() {
check(cudaFreeHost(H_ptrs[0]));
check(cudaFree(D_ptrs[0]));
}
template <int N>
VectorArray3D<N>::VectorArray3D(VectorArray3D<N> &&rhs) noexcept
: Nx(rhs.Nx), Ny(rhs.Ny), Nz(rhs.Nz) {
for (int i = 0; i < N; i++) {
H_ptrs[i] = rhs.H_ptrs[i];
D_ptrs[i] = rhs.D_ptrs[i];
rhs.H_ptrs[i] = nullptr;
rhs.D_ptrs[i] = nullptr;
}
}
template <int N>
VectorArray3D<N> &VectorArray3D<N>::operator=(VectorArray3D<N> &&rhs) noexcept {
if (this != &rhs) {
for (int i = 0; i < N; i++) {
H_ptrs[i] = rhs.H_ptrs[i];
D_ptrs[i] = rhs.D_ptrs[i];
rhs.H_ptrs[i] = nullptr;
rhs.D_ptrs[i] = nullptr;
}
Nx = rhs.Nx;
Ny = rhs.Ny;
Nz = rhs.Nz;
}
return *this;
}
template <int N> void VectorArray1D<N>::DeviceToHost() {
check(cudaMemcpy(H_ptrs[0], D_ptrs[0], sizeof(double) * Nx * N,
cudaMemcpyDeviceToHost));
}
template <int N> void VectorArray1D<N>::HostToDevice() {
check(cudaMemcpy(D_ptrs[0], H_ptrs[0], sizeof(double) * Nx * N,
cudaMemcpyHostToDevice));
}
template <int N> void VectorArray2D<N>::DeviceToHost() {
check(cudaMemcpy(H_ptrs[0], D_ptrs[0], sizeof(double) * Nx * Ny * N,
cudaMemcpyDeviceToHost));
}
template <int N> void VectorArray2D<N>::HostToDevice() {
check(cudaMemcpy(D_ptrs[0], H_ptrs[0], sizeof(double) * Nx * Ny * N,
cudaMemcpyHostToDevice));
}
template <int N> void VectorArray3D<N>::DeviceToHost() {
check(cudaMemcpy(H_ptrs[0], D_ptrs[0], sizeof(double) * Nx * Ny * Nz * N,
cudaMemcpyDeviceToHost));
}
template <int N> void VectorArray3D<N>::HostToDevice() {
check(cudaMemcpy(D_ptrs[0], H_ptrs[0], sizeof(double) * Nx * Ny * Nz * N,
cudaMemcpyHostToDevice));
}
} // namespace soa
#include "instanciation.ipp"
|
967aecc84482136b2e9ad72c32a8f624ebe162a6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
// functie kernel prin care adunam doi arrays
__global__ void vector_add(float *x, float *y, int n) {
// calculam indexul - echivalent cu for-ul
// threadId.x - id-ul unui thread blocul actual
// blockDim.x - dimensiunea blocului actual
// blockIdx.x - id-ul blocului actual
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < n) {
x[i] = x[i] + y[i];
}
}
int main(void)
{
const int num_elements = 1 << 16;
const int num_bytes = num_elements * sizeof(float);
float *host_array_x = 0, *host_array_y = 0; // arrays pentru host (CPU)
float *device_array_x = 0, *device_array_y = 0; // arrays pentru device (GPU)
// alocam memorie pentru host
host_array_x = (float *) malloc(num_bytes);
host_array_y = (float *) malloc(num_bytes);
// alocam memorie pentru device
hipMalloc((void **) &device_array_x, num_bytes);
hipMalloc((void **) &device_array_y, num_bytes);
// verificam daca alocarea a fost cu succes
if (host_array_x == 0 || host_array_y == 0 || device_array_x == 0 || device_array_y == 0) {
printf("[HOST] Couldn't allocate memory\n");
return 0;
}
// se initializeaza x si y
for (int i = 0; i < num_elements; i++) {
host_array_x[i] = 4;
host_array_y[i] = 2;
}
// facem transferul host -> device (CPU -> GPU)
hipMemcpy(device_array_x, host_array_x, num_bytes, hipMemcpyHostToDevice);
hipMemcpy(device_array_y, host_array_y, num_bytes, hipMemcpyHostToDevice);
// stabilim dimensiunea unui bloc (adica numarul de threads dintr-un bloc)
const size_t block_size = 256;
// numarul de blocuri
size_t blocks_no = num_elements / block_size;
// daca avem un bloc care nu are dimensiunea 256, incrementam numarul de blocuri
if (num_elements % block_size != 0) {
++blocks_no;
}
hipLaunchKernelGGL(( vector_add), dim3(blocks_no), dim3(block_size), 0, 0, device_array_x, device_array_y, num_elements);
// asteptam ca thread-urile de pe GPU sa-si termine treaba - echivalent cu pthread_join
// ca apoi sa facem transferul GPU -> CPU
hipDeviceSynchronize();
// transferul GPU -> CPU (device -> host)
hipMemcpy(host_array_x, device_array_x, num_bytes, hipMemcpyDeviceToHost);
hipMemcpy(host_array_y, device_array_y, num_bytes, hipMemcpyDeviceToHost);
for (int i = 0; i < 10; ++i) {
printf("Result %d: %1.1f + %1.1f = %1.3f\n", i, host_array_x[i] - host_array_y[i],
host_array_y[i], host_array_x[i]);
}
// eliberam memoria pe host
free(host_array_x);
free(host_array_y);
// eliberam memoria pe device
hipFree(device_array_x);
hipFree(device_array_y);
return 0;
} | 967aecc84482136b2e9ad72c32a8f624ebe162a6.cu | #include <math.h>
#include <stdio.h>
#include <stdlib.h>
// functie kernel prin care adunam doi arrays
__global__ void vector_add(float *x, float *y, int n) {
// calculam indexul - echivalent cu for-ul
// threadId.x - id-ul unui thread blocul actual
// blockDim.x - dimensiunea blocului actual
// blockIdx.x - id-ul blocului actual
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < n) {
x[i] = x[i] + y[i];
}
}
int main(void)
{
const int num_elements = 1 << 16;
const int num_bytes = num_elements * sizeof(float);
float *host_array_x = 0, *host_array_y = 0; // arrays pentru host (CPU)
float *device_array_x = 0, *device_array_y = 0; // arrays pentru device (GPU)
// alocam memorie pentru host
host_array_x = (float *) malloc(num_bytes);
host_array_y = (float *) malloc(num_bytes);
// alocam memorie pentru device
cudaMalloc((void **) &device_array_x, num_bytes);
cudaMalloc((void **) &device_array_y, num_bytes);
// verificam daca alocarea a fost cu succes
if (host_array_x == 0 || host_array_y == 0 || device_array_x == 0 || device_array_y == 0) {
printf("[HOST] Couldn't allocate memory\n");
return 0;
}
// se initializeaza x si y
for (int i = 0; i < num_elements; i++) {
host_array_x[i] = 4;
host_array_y[i] = 2;
}
// facem transferul host -> device (CPU -> GPU)
cudaMemcpy(device_array_x, host_array_x, num_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(device_array_y, host_array_y, num_bytes, cudaMemcpyHostToDevice);
// stabilim dimensiunea unui bloc (adica numarul de threads dintr-un bloc)
const size_t block_size = 256;
// numarul de blocuri
size_t blocks_no = num_elements / block_size;
// daca avem un bloc care nu are dimensiunea 256, incrementam numarul de blocuri
if (num_elements % block_size != 0) {
++blocks_no;
}
vector_add<<<blocks_no, block_size>>>(device_array_x, device_array_y, num_elements);
// asteptam ca thread-urile de pe GPU sa-si termine treaba - echivalent cu pthread_join
// ca apoi sa facem transferul GPU -> CPU
cudaDeviceSynchronize();
// transferul GPU -> CPU (device -> host)
cudaMemcpy(host_array_x, device_array_x, num_bytes, cudaMemcpyDeviceToHost);
cudaMemcpy(host_array_y, device_array_y, num_bytes, cudaMemcpyDeviceToHost);
for (int i = 0; i < 10; ++i) {
printf("Result %d: %1.1f + %1.1f = %1.3f\n", i, host_array_x[i] - host_array_y[i],
host_array_y[i], host_array_x[i]);
}
// eliberam memoria pe host
free(host_array_x);
free(host_array_y);
// eliberam memoria pe device
cudaFree(device_array_x);
cudaFree(device_array_y);
return 0;
} |
ab13fcdd1222fda3009c432517a18e03246a73d6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2022, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <[email protected]>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#define LBANN_ROWWISE_WEIGHTS_NORMS_LAYER_INSTANTIATE
#include "lbann/layers/misc/rowwise_weights_norms.hpp"
#include "lbann/utils/gpu/helpers.hpp"
namespace lbann {
namespace {
/**
* Block dimensions: bdimx x bdimy x 1
*
* Grid dimensions: (height/bdimx) x (width/bdimy) x 1
*/
template <typename T>
__global__ void row_sqsums_kernel(
size_t height,
size_t width,
const T* __restrict__ mat,
size_t mat_ldim,
T* __restrict__ row_sqsums) {
// Indices
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nthreadsy = blockDim.y * gridDim.y;
// Accumulate sum of squares for each matrix row
for (size_t row=gidx; row<height; row+=nthreadsx) {
T sqsum{0};
for (size_t col=gidy; col<width; col+=nthreadsy) {
const auto& x = mat[row+col*mat_ldim];
sqsum += x*x;
}
gpu_lib::atomic_add(&row_sqsums[row], sqsum);
}
}
} // namespace <anon>
template <typename TensorDataType, data_layout Layout, El::Device Device>
void rowwise_weights_norms_layer<TensorDataType, Layout, Device>::row_sqsums(
const El::Matrix<TensorDataType, Device>& mat,
El::Matrix<TensorDataType, Device>& row_sqsums) {
// Launch kernel
El::Zero(row_sqsums);
if (!mat.IsEmpty()) {
auto multisync = El::MakeMultiSync(gpu::get_sync_info(row_sqsums),
gpu::get_sync_info(mat));
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
block_dims.y = 1;
grid_dims.x = (mat.Height() + block_dims.x - 1) / block_dims.x;
grid_dims.y = (mat.Width()/64 + block_dims.y - 1) / block_dims.y;
grid_dims.y = El::Min(El::Max(grid_dims.y, 1), 65536);
hydrogen::gpu::LaunchKernel(
row_sqsums_kernel<TensorDataType>,
grid_dims, block_dims, 0, multisync,
static_cast<size_t>(mat.Height()),
static_cast<size_t>(mat.Width()),
mat.LockedBuffer(),
static_cast<size_t>(mat.LDim()),
row_sqsums.Buffer());
}
}
namespace {
/**
* Block dimensions: bdim x 1 x 1
*
* Grid dimensions: (size/bdim) x 1 x 1
*/
template <typename T>
__global__ void sqrt_kernel(
size_t size,
T* __restrict__ buf) {
const size_t gid = threadIdx.x + blockIdx.x * blockDim.x;
const size_t nthreads = blockDim.x * gridDim.x;
for (size_t i=gid; i<size; i+=nthreads) {
auto& x = buf[i];
x = gpu_lib::sqrt(x);
}
}
} // namespace <anon>
template <typename TensorDataType, data_layout Layout, El::Device Device>
void rowwise_weights_norms_layer<TensorDataType, Layout, Device>::sqrt(
El::Matrix<TensorDataType, Device>& mat) {
// Check that matrix is valid
if (!mat.Contiguous()) {
LBANN_ERROR("matrix is not contiguous");
}
// Launch kernel
if (!mat.IsEmpty()) {
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (mat.Height()*mat.Width() + block_size - 1) / block_size;
hydrogen::gpu::LaunchKernel(
sqrt_kernel<TensorDataType>,
grid_dims, block_dims, 0, gpu::get_sync_info(mat),
static_cast<size_t>(mat.Height()*mat.Width()),
mat.Buffer());
}
}
namespace {
/**
* Block dimensions: bdim x 1 x 1
*
* Grid dimensions: (size/bdim) x 1 x 1
*/
template <typename T>
__global__ void divide_kernel(
size_t size,
T* __restrict__ numer,
const T* __restrict__ denom) {
const size_t gid = threadIdx.x + blockIdx.x * blockDim.x;
const size_t nthreads = blockDim.x * gridDim.x;
for (size_t i=gid; i<size; i+=nthreads) {
auto& x = numer[i];
const auto& y = denom[i];
const auto& z = x / y;
x = gpu_lib::isfinite(z) ? z : T{0};
}
}
} // namespace <anon>
template <typename TensorDataType, data_layout Layout, El::Device Device>
void rowwise_weights_norms_layer<TensorDataType, Layout, Device>::divide(
El::Matrix<TensorDataType, Device>& numer,
const El::Matrix<TensorDataType, Device>& denom) {
// Check that matrices are valid
if (numer.Height() != denom.Height()
|| numer.Width() != denom.Width()) {
LBANN_ERROR("numerator and denominator do not have same dims");
}
if (!numer.Contiguous() || !denom.Contiguous()) {
LBANN_ERROR("matrices are not contiguous");
}
// Launch kernel
if (!numer.IsEmpty()) {
auto multisync = El::MakeMultiSync(gpu::get_sync_info(numer),
gpu::get_sync_info(denom));
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (numer.Height()*numer.Width() + block_size - 1) / block_size;
hydrogen::gpu::LaunchKernel(
divide_kernel<TensorDataType>,
grid_dims, block_dims, 0, multisync,
numer.Height()*numer.Width(),
numer.Buffer(),
denom.LockedBuffer());
}
}
namespace {
template <typename T>
__global__ void row_axpy_kernel(
size_t height,
size_t width,
T alpha,
const T* __restrict__ a_vec,
const T* __restrict__ x_mat,
size_t x_ldim,
T beta,
T* __restrict__ y_mat,
size_t y_ldim) {
// Indices
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nthreadsy = blockDim.y * gridDim.y;
// Accumulate sum of squares for each matrix row
for (size_t row=gidx; row<height; row+=nthreadsx) {
const auto& alpha_a = alpha * a_vec[row];
for (size_t col=gidy; col<width; col+=nthreadsy) {
const auto& x = x_mat[row+col*x_ldim];
auto& y = y_mat[row+col*y_ldim];
y = alpha_a * x + beta * y;
}
}
}
} // namespace <anon>
/**
* Block dimensions: bdimx x bdimy x 1
*
* Grid dimensions: (height/bdimx) x (width/bdimy) x 1
*/
template <typename TensorDataType, data_layout Layout, El::Device Device>
void rowwise_weights_norms_layer<TensorDataType, Layout, Device>::row_axpy(
TensorDataType alpha,
const El::Matrix<TensorDataType, Device>& a_vec,
const El::Matrix<TensorDataType, Device>& x_mat,
TensorDataType beta,
El::Matrix<TensorDataType, Device>& y_mat) {
// Check that matrices are valid
if (x_mat.Height() != y_mat.Height()
|| x_mat.Width() != y_mat.Width()
|| a_vec.Height() != y_mat.Height()
|| a_vec.Width() != 1) {
LBANN_ERROR("matrix dims do not match");
}
// Launch kernel
if (!y_mat.IsEmpty()) {
auto multisync = El::MakeMultiSync(gpu::get_sync_info(y_mat),
gpu::get_sync_info(a_vec),
gpu::get_sync_info(x_mat));
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
block_dims.x = 1;
grid_dims.x = (y_mat.Height() + block_dims.x - 1) / block_dims.x;
grid_dims.y = (y_mat.Width() + block_dims.y - 1) / block_dims.y;
grid_dims.y = El::Min(grid_dims.y, 65536);
hydrogen::gpu::LaunchKernel(
row_axpy_kernel<TensorDataType>,
grid_dims, block_dims, 0, multisync,
static_cast<size_t>(y_mat.Height()),
static_cast<size_t>(y_mat.Width()),
alpha,
a_vec.LockedBuffer(),
x_mat.LockedBuffer(),
static_cast<size_t>(x_mat.LDim()),
beta,
y_mat.Buffer(),
static_cast<size_t>(y_mat.LDim()));
}
}
#define PROTO(T) \
template class rowwise_weights_norms_layer< \
T, data_layout::DATA_PARALLEL, El::Device::GPU>; \
template class rowwise_weights_norms_layer< \
T, data_layout::MODEL_PARALLEL, El::Device::GPU>
#define LBANN_INSTANTIATE_GPU_HALF
#include "lbann/macros/instantiate.hpp"
} // namespace lbann
| ab13fcdd1222fda3009c432517a18e03246a73d6.cu | ////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2022, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <[email protected]>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#define LBANN_ROWWISE_WEIGHTS_NORMS_LAYER_INSTANTIATE
#include "lbann/layers/misc/rowwise_weights_norms.hpp"
#include "lbann/utils/gpu/helpers.hpp"
namespace lbann {
namespace {
/**
* Block dimensions: bdimx x bdimy x 1
*
* Grid dimensions: (height/bdimx) x (width/bdimy) x 1
*/
template <typename T>
__global__ void row_sqsums_kernel(
size_t height,
size_t width,
const T* __restrict__ mat,
size_t mat_ldim,
T* __restrict__ row_sqsums) {
// Indices
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nthreadsy = blockDim.y * gridDim.y;
// Accumulate sum of squares for each matrix row
for (size_t row=gidx; row<height; row+=nthreadsx) {
T sqsum{0};
for (size_t col=gidy; col<width; col+=nthreadsy) {
const auto& x = mat[row+col*mat_ldim];
sqsum += x*x;
}
gpu_lib::atomic_add(&row_sqsums[row], sqsum);
}
}
} // namespace <anon>
template <typename TensorDataType, data_layout Layout, El::Device Device>
void rowwise_weights_norms_layer<TensorDataType, Layout, Device>::row_sqsums(
const El::Matrix<TensorDataType, Device>& mat,
El::Matrix<TensorDataType, Device>& row_sqsums) {
// Launch kernel
El::Zero(row_sqsums);
if (!mat.IsEmpty()) {
auto multisync = El::MakeMultiSync(gpu::get_sync_info(row_sqsums),
gpu::get_sync_info(mat));
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
block_dims.y = 1;
grid_dims.x = (mat.Height() + block_dims.x - 1) / block_dims.x;
grid_dims.y = (mat.Width()/64 + block_dims.y - 1) / block_dims.y;
grid_dims.y = El::Min(El::Max(grid_dims.y, 1), 65536);
hydrogen::gpu::LaunchKernel(
row_sqsums_kernel<TensorDataType>,
grid_dims, block_dims, 0, multisync,
static_cast<size_t>(mat.Height()),
static_cast<size_t>(mat.Width()),
mat.LockedBuffer(),
static_cast<size_t>(mat.LDim()),
row_sqsums.Buffer());
}
}
namespace {
/**
* Block dimensions: bdim x 1 x 1
*
* Grid dimensions: (size/bdim) x 1 x 1
*/
template <typename T>
__global__ void sqrt_kernel(
size_t size,
T* __restrict__ buf) {
const size_t gid = threadIdx.x + blockIdx.x * blockDim.x;
const size_t nthreads = blockDim.x * gridDim.x;
for (size_t i=gid; i<size; i+=nthreads) {
auto& x = buf[i];
x = gpu_lib::sqrt(x);
}
}
} // namespace <anon>
template <typename TensorDataType, data_layout Layout, El::Device Device>
void rowwise_weights_norms_layer<TensorDataType, Layout, Device>::sqrt(
El::Matrix<TensorDataType, Device>& mat) {
// Check that matrix is valid
if (!mat.Contiguous()) {
LBANN_ERROR("matrix is not contiguous");
}
// Launch kernel
if (!mat.IsEmpty()) {
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (mat.Height()*mat.Width() + block_size - 1) / block_size;
hydrogen::gpu::LaunchKernel(
sqrt_kernel<TensorDataType>,
grid_dims, block_dims, 0, gpu::get_sync_info(mat),
static_cast<size_t>(mat.Height()*mat.Width()),
mat.Buffer());
}
}
namespace {
/**
* Block dimensions: bdim x 1 x 1
*
* Grid dimensions: (size/bdim) x 1 x 1
*/
template <typename T>
__global__ void divide_kernel(
size_t size,
T* __restrict__ numer,
const T* __restrict__ denom) {
const size_t gid = threadIdx.x + blockIdx.x * blockDim.x;
const size_t nthreads = blockDim.x * gridDim.x;
for (size_t i=gid; i<size; i+=nthreads) {
auto& x = numer[i];
const auto& y = denom[i];
const auto& z = x / y;
x = gpu_lib::isfinite(z) ? z : T{0};
}
}
} // namespace <anon>
template <typename TensorDataType, data_layout Layout, El::Device Device>
void rowwise_weights_norms_layer<TensorDataType, Layout, Device>::divide(
El::Matrix<TensorDataType, Device>& numer,
const El::Matrix<TensorDataType, Device>& denom) {
// Check that matrices are valid
if (numer.Height() != denom.Height()
|| numer.Width() != denom.Width()) {
LBANN_ERROR("numerator and denominator do not have same dims");
}
if (!numer.Contiguous() || !denom.Contiguous()) {
LBANN_ERROR("matrices are not contiguous");
}
// Launch kernel
if (!numer.IsEmpty()) {
auto multisync = El::MakeMultiSync(gpu::get_sync_info(numer),
gpu::get_sync_info(denom));
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (numer.Height()*numer.Width() + block_size - 1) / block_size;
hydrogen::gpu::LaunchKernel(
divide_kernel<TensorDataType>,
grid_dims, block_dims, 0, multisync,
numer.Height()*numer.Width(),
numer.Buffer(),
denom.LockedBuffer());
}
}
namespace {
template <typename T>
__global__ void row_axpy_kernel(
size_t height,
size_t width,
T alpha,
const T* __restrict__ a_vec,
const T* __restrict__ x_mat,
size_t x_ldim,
T beta,
T* __restrict__ y_mat,
size_t y_ldim) {
// Indices
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nthreadsy = blockDim.y * gridDim.y;
// Accumulate sum of squares for each matrix row
for (size_t row=gidx; row<height; row+=nthreadsx) {
const auto& alpha_a = alpha * a_vec[row];
for (size_t col=gidy; col<width; col+=nthreadsy) {
const auto& x = x_mat[row+col*x_ldim];
auto& y = y_mat[row+col*y_ldim];
y = alpha_a * x + beta * y;
}
}
}
} // namespace <anon>
/**
* Block dimensions: bdimx x bdimy x 1
*
* Grid dimensions: (height/bdimx) x (width/bdimy) x 1
*/
template <typename TensorDataType, data_layout Layout, El::Device Device>
void rowwise_weights_norms_layer<TensorDataType, Layout, Device>::row_axpy(
TensorDataType alpha,
const El::Matrix<TensorDataType, Device>& a_vec,
const El::Matrix<TensorDataType, Device>& x_mat,
TensorDataType beta,
El::Matrix<TensorDataType, Device>& y_mat) {
// Check that matrices are valid
if (x_mat.Height() != y_mat.Height()
|| x_mat.Width() != y_mat.Width()
|| a_vec.Height() != y_mat.Height()
|| a_vec.Width() != 1) {
LBANN_ERROR("matrix dims do not match");
}
// Launch kernel
if (!y_mat.IsEmpty()) {
auto multisync = El::MakeMultiSync(gpu::get_sync_info(y_mat),
gpu::get_sync_info(a_vec),
gpu::get_sync_info(x_mat));
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
block_dims.x = 1;
grid_dims.x = (y_mat.Height() + block_dims.x - 1) / block_dims.x;
grid_dims.y = (y_mat.Width() + block_dims.y - 1) / block_dims.y;
grid_dims.y = El::Min(grid_dims.y, 65536);
hydrogen::gpu::LaunchKernel(
row_axpy_kernel<TensorDataType>,
grid_dims, block_dims, 0, multisync,
static_cast<size_t>(y_mat.Height()),
static_cast<size_t>(y_mat.Width()),
alpha,
a_vec.LockedBuffer(),
x_mat.LockedBuffer(),
static_cast<size_t>(x_mat.LDim()),
beta,
y_mat.Buffer(),
static_cast<size_t>(y_mat.LDim()));
}
}
#define PROTO(T) \
template class rowwise_weights_norms_layer< \
T, data_layout::DATA_PARALLEL, El::Device::GPU>; \
template class rowwise_weights_norms_layer< \
T, data_layout::MODEL_PARALLEL, El::Device::GPU>
#define LBANN_INSTANTIATE_GPU_HALF
#include "lbann/macros/instantiate.hpp"
} // namespace lbann
|
245f0ecf24f846079c8a65a60a860c08c7399d1e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <fstream>
#include <vector>
#include <algorithm>
#include <omp.h>
#include <math.h> /* fabsf */
#include <string.h>
#include <stdlib.h>
#include <time.h>
#define DEBUG 0
//Error check-----
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
//Error check-----
//This is a very good idea to wrap your calls with that function.. Otherwise you will not be able to see what is the error.
//Moreover, you may also want to look at how to use cuda-memcheck and cuda-gdb for debugging.
__global__ void calculateError(int* xadj, int* adj, double* rv, double* cv, double* maxErr, int maxOperation) {
// Get idx for each thread
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < maxOperation) {
int starti = xadj[i], endi = xadj[i+1];
double err = 0;
for (int j = starti; j < endi; j++) err += rv[i] * cv[adj[j]];
err = fabs(1-err);
if (err > *maxErr) *maxErr = err;
}
}
__global__ void scaleskRV(int* xadj, int* adj, double* rv, double* cv, int maxOperation) {
// Get idx for each thread
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < maxOperation) {
int starti = xadj[i], endi = xadj[i+1];
double rowSum = 0;
for (int j = starti; j < endi; j++) rowSum += cv[adj[j]];
rv[i] = 1 / rowSum;
}
}
__global__ void scaleskCV(int* txadj, int* tadj, double* rv, double* cv, int maxOperation) {
// Get idx for each thread
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < maxOperation) {
int starti = txadj[i], endi = txadj[i+1];
double colSum = 0;
for (int j = starti; j < endi; j++) colSum += rv[tadj[j]];
cv[i] = 1 / colSum;
}
}
void wrapper(int* adj, int* xadj, int* tadj, int* txadj, double* rv, double* cv, int* nov, int* nnz, int siter){
// Initialize rv and cv
for (int i = 0; i < *nov; i++) rv[i] = cv[i] = 1;
// Transfer data from host to device.
int NO_THREADS = 1024;
int NO_BLOCKS = (*nov + NO_THREADS - 1)/NO_THREADS;
int maxOperation = (*nov) - 1;
std::cout << "NO_BLOCKS " << NO_BLOCKS << std::endl;
std::cout << "NO_THREADS " << NO_THREADS << std::endl;
std::cout << "NO_THREADS * NO_BLOCKS " << NO_THREADS * NO_BLOCKS << std::endl;
std::cout << "maxOperation " << maxOperation << std::endl;
std::cout << "no of one " << xadj[*nov] << std::endl;
int* adj_d, *xadj_d, *tadj_d, *txadj_d;
gpuErrchk(hipMalloc( (void**) &adj_d, (*nnz) * sizeof(int)));
gpuErrchk(hipMemcpy(adj_d, adj, (*nnz) * sizeof(int), hipMemcpyHostToDevice ));
gpuErrchk(hipMalloc( (void**) &xadj_d, (*nov) * sizeof(int)));
gpuErrchk(hipMemcpy(xadj_d, xadj, (*nov) * sizeof(int), hipMemcpyHostToDevice ));
gpuErrchk(hipMalloc( (void**) &tadj_d, (*nnz) * sizeof(int)));
gpuErrchk(hipMemcpy(tadj_d, tadj,(*nnz) * sizeof(int), hipMemcpyHostToDevice ));
gpuErrchk(hipMalloc( (void**) &txadj_d, (*nov) * sizeof(int)));
gpuErrchk(hipMemcpy(txadj_d, txadj,(*nov) * sizeof(int), hipMemcpyHostToDevice ));
double* rv_d, *cv_d;
gpuErrchk(hipMalloc( (void**) &rv_d, (*nov) * sizeof(double)));
gpuErrchk(hipMemcpy(rv_d, rv, (*nov) * sizeof(double), hipMemcpyHostToDevice ));
gpuErrchk(hipMalloc( (void**) &cv_d, (*nov) * sizeof(double)));
gpuErrchk(hipMemcpy(cv_d, cv, (*nov) * sizeof(double), hipMemcpyHostToDevice ));
double* err_d;
double* err = new double(0);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventRecord(start, 0);
for (int i = 0; i < siter; i++) {
// Fill rv
hipLaunchKernelGGL(( scaleskRV), dim3(NO_BLOCKS), dim3(NO_THREADS), 0, 0, xadj_d, adj_d, rv_d, cv_d, maxOperation);
gpuErrchk(hipPeekAtLastError());
// Fill cv
hipLaunchKernelGGL(( scaleskCV), dim3(NO_BLOCKS), dim3(NO_THREADS), 0, 0, txadj_d, tadj_d, rv_d, cv_d, maxOperation);
gpuErrchk(hipPeekAtLastError());
// calculate error
gpuErrchk(hipMalloc((void**) &err_d, sizeof(double)));
hipLaunchKernelGGL(( calculateError), dim3(NO_BLOCKS), dim3(NO_THREADS), 0, 0, xadj_d, adj_d, rv_d, cv_d, err_d, maxOperation);
// get error from device
gpuErrchk(hipMemcpy(err, err_d, sizeof(double), hipMemcpyDeviceToHost));
gpuErrchk(hipFree(err_d));
std::cout << "iter " << i << " - error: " << *err << std::endl;
}
hipEventCreate(&stop);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float elapsedTime;
hipEventElapsedTime(&elapsedTime, start, stop);
printf("GPU scale took: %f s\n", elapsedTime/1000);
gpuErrchk(hipFree(xadj_d));
gpuErrchk(hipFree(adj_d));
gpuErrchk(hipFree(txadj_d));
gpuErrchk(hipFree(tadj_d));
gpuErrchk(hipFree(rv_d));
gpuErrchk(hipFree(cv_d));
}
| 245f0ecf24f846079c8a65a60a860c08c7399d1e.cu | #include <iostream>
#include <fstream>
#include <vector>
#include <algorithm>
#include <omp.h>
#include <math.h> /* fabsf */
#include <string.h>
#include <stdlib.h>
#include <time.h>
#define DEBUG 0
//Error check-----
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
//Error check-----
//This is a very good idea to wrap your calls with that function.. Otherwise you will not be able to see what is the error.
//Moreover, you may also want to look at how to use cuda-memcheck and cuda-gdb for debugging.
__global__ void calculateError(int* xadj, int* adj, double* rv, double* cv, double* maxErr, int maxOperation) {
// Get idx for each thread
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < maxOperation) {
int starti = xadj[i], endi = xadj[i+1];
double err = 0;
for (int j = starti; j < endi; j++) err += rv[i] * cv[adj[j]];
err = fabs(1-err);
if (err > *maxErr) *maxErr = err;
}
}
__global__ void scaleskRV(int* xadj, int* adj, double* rv, double* cv, int maxOperation) {
// Get idx for each thread
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < maxOperation) {
int starti = xadj[i], endi = xadj[i+1];
double rowSum = 0;
for (int j = starti; j < endi; j++) rowSum += cv[adj[j]];
rv[i] = 1 / rowSum;
}
}
__global__ void scaleskCV(int* txadj, int* tadj, double* rv, double* cv, int maxOperation) {
// Get idx for each thread
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < maxOperation) {
int starti = txadj[i], endi = txadj[i+1];
double colSum = 0;
for (int j = starti; j < endi; j++) colSum += rv[tadj[j]];
cv[i] = 1 / colSum;
}
}
void wrapper(int* adj, int* xadj, int* tadj, int* txadj, double* rv, double* cv, int* nov, int* nnz, int siter){
// Initialize rv and cv
for (int i = 0; i < *nov; i++) rv[i] = cv[i] = 1;
// Transfer data from host to device.
int NO_THREADS = 1024;
int NO_BLOCKS = (*nov + NO_THREADS - 1)/NO_THREADS;
int maxOperation = (*nov) - 1;
std::cout << "NO_BLOCKS " << NO_BLOCKS << std::endl;
std::cout << "NO_THREADS " << NO_THREADS << std::endl;
std::cout << "NO_THREADS * NO_BLOCKS " << NO_THREADS * NO_BLOCKS << std::endl;
std::cout << "maxOperation " << maxOperation << std::endl;
std::cout << "no of one " << xadj[*nov] << std::endl;
int* adj_d, *xadj_d, *tadj_d, *txadj_d;
gpuErrchk(cudaMalloc( (void**) &adj_d, (*nnz) * sizeof(int)));
gpuErrchk(cudaMemcpy(adj_d, adj, (*nnz) * sizeof(int), cudaMemcpyHostToDevice ));
gpuErrchk(cudaMalloc( (void**) &xadj_d, (*nov) * sizeof(int)));
gpuErrchk(cudaMemcpy(xadj_d, xadj, (*nov) * sizeof(int), cudaMemcpyHostToDevice ));
gpuErrchk(cudaMalloc( (void**) &tadj_d, (*nnz) * sizeof(int)));
gpuErrchk(cudaMemcpy(tadj_d, tadj,(*nnz) * sizeof(int), cudaMemcpyHostToDevice ));
gpuErrchk(cudaMalloc( (void**) &txadj_d, (*nov) * sizeof(int)));
gpuErrchk(cudaMemcpy(txadj_d, txadj,(*nov) * sizeof(int), cudaMemcpyHostToDevice ));
double* rv_d, *cv_d;
gpuErrchk(cudaMalloc( (void**) &rv_d, (*nov) * sizeof(double)));
gpuErrchk(cudaMemcpy(rv_d, rv, (*nov) * sizeof(double), cudaMemcpyHostToDevice ));
gpuErrchk(cudaMalloc( (void**) &cv_d, (*nov) * sizeof(double)));
gpuErrchk(cudaMemcpy(cv_d, cv, (*nov) * sizeof(double), cudaMemcpyHostToDevice ));
double* err_d;
double* err = new double(0);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventRecord(start, 0);
for (int i = 0; i < siter; i++) {
// Fill rv
scaleskRV<<<NO_BLOCKS, NO_THREADS>>>(xadj_d, adj_d, rv_d, cv_d, maxOperation);
gpuErrchk(cudaPeekAtLastError());
// Fill cv
scaleskCV<<<NO_BLOCKS, NO_THREADS>>>(txadj_d, tadj_d, rv_d, cv_d, maxOperation);
gpuErrchk(cudaPeekAtLastError());
// calculate error
gpuErrchk(cudaMalloc((void**) &err_d, sizeof(double)));
calculateError<<<NO_BLOCKS, NO_THREADS>>>(xadj_d, adj_d, rv_d, cv_d, err_d, maxOperation);
// get error from device
gpuErrchk(cudaMemcpy(err, err_d, sizeof(double), cudaMemcpyDeviceToHost));
gpuErrchk(cudaFree(err_d));
std::cout << "iter " << i << " - error: " << *err << std::endl;
}
cudaEventCreate(&stop);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("GPU scale took: %f s\n", elapsedTime/1000);
gpuErrchk(cudaFree(xadj_d));
gpuErrchk(cudaFree(adj_d));
gpuErrchk(cudaFree(txadj_d));
gpuErrchk(cudaFree(tadj_d));
gpuErrchk(cudaFree(rv_d));
gpuErrchk(cudaFree(cv_d));
}
|
90f07d34058edf24736f789e06308a269198fdf2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "matrix.hh"
#include "nn_exception.hh"
//using ":" with initalization lists to initialize the fields
Matrix::Matrix(size_t x_dim, size_t y_dim) :
shape(x_dim, y_dim), data_device(nullptr), data_host(nullptr),
device_allocated(false), host_allocated(false)
{ }
Matrix::Matrix(Shape shape) :
Matrix(shape.x, shape.y)
{ }
void Matrix::allocateCudaMemory() {
if (!device_allocated) {
float* device_memory = nullptr;
hipMalloc(&device_memory, shape.x * shape.y * sizeof(float));
NNException::throwIfDeviceErrorsOccurred("Cannot allocate CUDA memory for Tensor3D.");
data_device = std::shared_ptr<float>(device_memory,
[&](float* ptr){ hipFree(ptr); });
device_allocated = true;
}
}
void Matrix::allocateHostMemory() {
if (!host_allocated) {
//passing pointer to an allocated memory to shared_ptr, as smart pointer, by default will call delete opeartor we need to pass over second argument that tell how to perform deallocation.
data_host = std::shared_ptr<float>(new float[shape.x * shape.y],
[&](float* ptr){ delete[] ptr; });
host_allocated = true;
}
}
void Matrix::allocateMemory() {
allocateCudaMemory();
allocateHostMemory();
}
void Matrix::allocateMemoryIfNotAllocated(Shape shape) {
if (!device_allocated && !host_allocated) {
this->shape = shape;
allocateMemory();
}
}
void Matrix::copyHostToDevice() {
if (device_allocated && host_allocated) {
hipMemcpy(data_device.get(), data_host.get(), shape.x * shape.y * sizeof(float), hipMemcpyHostToDevice);
NNException::throwIfDeviceErrorsOccurred("Cannot copy host data to CUDA device.");
}
else {
throw NNException("Cannot copy host data to not allocated memory on device.");
}
}
void Matrix::copyDeviceToHost() {
if (device_allocated && host_allocated) {
hipMemcpy(data_host.get(), data_device.get(), shape.x * shape.y * sizeof(float), hipMemcpyDeviceToHost);
NNException::throwIfDeviceErrorsOccurred("Cannot copy device data to host.");
}
else {
throw NNException("Cannot copy device data to not allocated memory on host.");
}
}
float& Matrix::operator[](const int index) {
return data_host.get()[index];
}
const float& Matrix::operator[](const int index) const {
return data_host.get()[index];
}
| 90f07d34058edf24736f789e06308a269198fdf2.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "matrix.hh"
#include "nn_exception.hh"
//using ":" with initalization lists to initialize the fields
Matrix::Matrix(size_t x_dim, size_t y_dim) :
shape(x_dim, y_dim), data_device(nullptr), data_host(nullptr),
device_allocated(false), host_allocated(false)
{ }
Matrix::Matrix(Shape shape) :
Matrix(shape.x, shape.y)
{ }
void Matrix::allocateCudaMemory() {
if (!device_allocated) {
float* device_memory = nullptr;
cudaMalloc(&device_memory, shape.x * shape.y * sizeof(float));
NNException::throwIfDeviceErrorsOccurred("Cannot allocate CUDA memory for Tensor3D.");
data_device = std::shared_ptr<float>(device_memory,
[&](float* ptr){ cudaFree(ptr); });
device_allocated = true;
}
}
void Matrix::allocateHostMemory() {
if (!host_allocated) {
//passing pointer to an allocated memory to shared_ptr, as smart pointer, by default will call delete opeartor we need to pass over second argument that tell how to perform deallocation.
data_host = std::shared_ptr<float>(new float[shape.x * shape.y],
[&](float* ptr){ delete[] ptr; });
host_allocated = true;
}
}
void Matrix::allocateMemory() {
allocateCudaMemory();
allocateHostMemory();
}
void Matrix::allocateMemoryIfNotAllocated(Shape shape) {
if (!device_allocated && !host_allocated) {
this->shape = shape;
allocateMemory();
}
}
void Matrix::copyHostToDevice() {
if (device_allocated && host_allocated) {
cudaMemcpy(data_device.get(), data_host.get(), shape.x * shape.y * sizeof(float), cudaMemcpyHostToDevice);
NNException::throwIfDeviceErrorsOccurred("Cannot copy host data to CUDA device.");
}
else {
throw NNException("Cannot copy host data to not allocated memory on device.");
}
}
void Matrix::copyDeviceToHost() {
if (device_allocated && host_allocated) {
cudaMemcpy(data_host.get(), data_device.get(), shape.x * shape.y * sizeof(float), cudaMemcpyDeviceToHost);
NNException::throwIfDeviceErrorsOccurred("Cannot copy device data to host.");
}
else {
throw NNException("Cannot copy device data to not allocated memory on host.");
}
}
float& Matrix::operator[](const int index) {
return data_host.get()[index];
}
const float& Matrix::operator[](const int index) const {
return data_host.get()[index];
}
|
4c38acfaeed5a1b428b720c42d2ff068ed1609ce.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "common.h"
#include "efficient.h"
namespace StreamCompaction {
namespace Efficient {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
timer().startGpuTimer();
// TODO
timer().endGpuTimer();
}
/**
* Performs stream compaction on idata, storing the result into odata.
* All zeroes are discarded.
*
* @param n The number of elements in idata.
* @param odata The array into which to store elements.
* @param idata The array of elements to compact.
* @returns The number of elements remaining after compaction.
*/
int compact(int n, int *odata, const int *idata) {
timer().startGpuTimer();
// TODO
timer().endGpuTimer();
return -1;
}
}
}
| 4c38acfaeed5a1b428b720c42d2ff068ed1609ce.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include "common.h"
#include "efficient.h"
namespace StreamCompaction {
namespace Efficient {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
timer().startGpuTimer();
// TODO
timer().endGpuTimer();
}
/**
* Performs stream compaction on idata, storing the result into odata.
* All zeroes are discarded.
*
* @param n The number of elements in idata.
* @param odata The array into which to store elements.
* @param idata The array of elements to compact.
* @returns The number of elements remaining after compaction.
*/
int compact(int n, int *odata, const int *idata) {
timer().startGpuTimer();
// TODO
timer().endGpuTimer();
return -1;
}
}
}
|
b08e6087b867e68ca036b10defbf0238544a6c62.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
__global__ void incr(int *ptr)
{
/*int temp = *ptr;
temp = temp + 1;
*ptr = temp;*/
atomicAdd(ptr,1);
}
//int main()
//{
// int value = 0;
// int SIZE = sizeof(int);
// int ref = -1;
//
// int *d_val;
// hipMalloc((void**)&d_val, SIZE);
// hipMemcpy(d_val, &value, SIZE, hipMemcpyHostToDevice);
// incr << <1, 32 >> > (d_val);
// hipDeviceSynchronize();
// hipMemcpy(&ref,d_val,SIZE, hipMemcpyDeviceToHost);
//
// printf("Updated value : %d \n",ref);
//
// hipDeviceReset();
// return 0;
//} | b08e6087b867e68ca036b10defbf0238544a6c62.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
__global__ void incr(int *ptr)
{
/*int temp = *ptr;
temp = temp + 1;
*ptr = temp;*/
atomicAdd(ptr,1);
}
//int main()
//{
// int value = 0;
// int SIZE = sizeof(int);
// int ref = -1;
//
// int *d_val;
// cudaMalloc((void**)&d_val, SIZE);
// cudaMemcpy(d_val, &value, SIZE, cudaMemcpyHostToDevice);
// incr << <1, 32 >> > (d_val);
// cudaDeviceSynchronize();
// cudaMemcpy(&ref,d_val,SIZE, cudaMemcpyDeviceToHost);
//
// printf("Updated value : %d \n",ref);
//
// cudaDeviceReset();
// return 0;
//} |
d9967119c5af56674be948110d22a928ac4a8bf7.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <inttypes.h>
#include <device_launch_parameters.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
//page size is 32bytes
#define PAGESIZE 32
//32 KB in shared memory
#define PHYSICAL_MEM_SIZE 32768
//128 KB in global memory
#define STORAGE_SIZE 131072
#define DATAFILE "./data.bin"
#define OUTFILE "./snapshot.bin"
typedef unsigned char uchar;
typedef uint32_t u32;
//page table entries
__device__ __managed__ int PAGE_ENTRIES = 0;
//count the pagefault times
__device__ __managed__ int PAGEFAULT_NUM = 0;
//__shared__ int PAGEFAULT_NUM;
//secondary memory
__device__ __managed__ uchar storage[STORAGE_SIZE];
//data input and output
__device__ __managed__ uchar results[STORAGE_SIZE];
__device__ __managed__ uchar input[STORAGE_SIZE];
//page table
extern __shared__ u32 pt[];
__device__ void init_pageTable(int entries)
{
PAGE_ENTRIES = entries;
for (int i = 0; i < entries; i++)
{
pt[i] = 0x80000000; //invalid
pt[i + PAGE_ENTRIES] = i;
}
}
__device__ uchar Gread(uchar *buffer, u32 addr)
{
/* Complate Gread function to read value from data buffer */
}
__device__ void Gwrite(uchar *buffer, u32 addr, uchar value)
{
/* Complete Gwrite function to write value to data buffer */
}
__device__ void snapshot(uchar *results, uchar* buffer, int offset, int input_size)
{
/* Complete snapshot function to load elements from data to result */
}
__global__ void mykernel(int input_size)
{
//take shared memory as physical memory
__shared__ uchar data[PHYSICAL_MEM_SIZE];
//get page table entries
int pt_entries = PHYSICAL_MEM_SIZE / PAGESIZE;
//before first Gwrite or Gread
init_pageTable(pt_entries);
for (int i = 0; i < input_size; i++)
Gwrite(data, i, input[i]);
for (int i = input_size - 1; i >= input_size - 32769; i--)
int value = Gread(data, i);
snapshot(results, data, 0, input_size);
}
__host__ void write_binaryFile(char *fileName, void *buffer, int bufferSize)
{
FILE *fp;
fp = fopen(fileName, "wb");
fwrite(buffer, 1, bufferSize, fp);
fclose(fp);
}
__host__ int load_binaryFile(char *fileName, void *buffer, int bufferSize)
{
FILE *fp;
fp = fopen(fileName, "rb");
if (!fp)
{
printf("***Unable to open file %s***\n", fileName);
exit(1);
}
//Get file length
fseek(fp, 0, SEEK_END);
int fileLen = ftell(fp);
fseek(fp, 0, SEEK_SET);
//printf("fileLen: %ld\n", fileLen);
if (fileLen > bufferSize)
{
printf("****invalid testcase!!****\n");
printf("****software warrning: the file: %s size****\n", fileName);
printf("****is greater than buffer size****\n");
exit(1);
}
//Read file contents into buffer
fread(buffer, fileLen, 1, fp);
fclose(fp);
return fileLen;
}
int main()
{
hipError_t cudaStatus;
int input_size = load_binaryFile(DATAFILE, input, STORAGE_SIZE);
mykernel << <1, 1, 16384 >> > (input_size);
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "mykernel launch failed: %s\n", hipGetErrorString(cudaStatus));
return 0;
}
printf("input size: %d\n", input_size);
hipDeviceSynchronize();
hipDeviceReset();
write_binaryFile(OUTFILE, results, input_size);
printf("pagefault number is %d\n", PAGEFAULT_NUM);
return 0;
} | d9967119c5af56674be948110d22a928ac4a8bf7.cu | #include <stdio.h>
#include <stdlib.h>
#include <inttypes.h>
#include <device_launch_parameters.h>
#include <cuda_runtime.h>
#include <cuda.h>
//page size is 32bytes
#define PAGESIZE 32
//32 KB in shared memory
#define PHYSICAL_MEM_SIZE 32768
//128 KB in global memory
#define STORAGE_SIZE 131072
#define DATAFILE "./data.bin"
#define OUTFILE "./snapshot.bin"
typedef unsigned char uchar;
typedef uint32_t u32;
//page table entries
__device__ __managed__ int PAGE_ENTRIES = 0;
//count the pagefault times
__device__ __managed__ int PAGEFAULT_NUM = 0;
//__shared__ int PAGEFAULT_NUM;
//secondary memory
__device__ __managed__ uchar storage[STORAGE_SIZE];
//data input and output
__device__ __managed__ uchar results[STORAGE_SIZE];
__device__ __managed__ uchar input[STORAGE_SIZE];
//page table
extern __shared__ u32 pt[];
__device__ void init_pageTable(int entries)
{
PAGE_ENTRIES = entries;
for (int i = 0; i < entries; i++)
{
pt[i] = 0x80000000; //invalid
pt[i + PAGE_ENTRIES] = i;
}
}
__device__ uchar Gread(uchar *buffer, u32 addr)
{
/* Complate Gread function to read value from data buffer */
}
__device__ void Gwrite(uchar *buffer, u32 addr, uchar value)
{
/* Complete Gwrite function to write value to data buffer */
}
__device__ void snapshot(uchar *results, uchar* buffer, int offset, int input_size)
{
/* Complete snapshot function to load elements from data to result */
}
__global__ void mykernel(int input_size)
{
//take shared memory as physical memory
__shared__ uchar data[PHYSICAL_MEM_SIZE];
//get page table entries
int pt_entries = PHYSICAL_MEM_SIZE / PAGESIZE;
//before first Gwrite or Gread
init_pageTable(pt_entries);
for (int i = 0; i < input_size; i++)
Gwrite(data, i, input[i]);
for (int i = input_size - 1; i >= input_size - 32769; i--)
int value = Gread(data, i);
snapshot(results, data, 0, input_size);
}
__host__ void write_binaryFile(char *fileName, void *buffer, int bufferSize)
{
FILE *fp;
fp = fopen(fileName, "wb");
fwrite(buffer, 1, bufferSize, fp);
fclose(fp);
}
__host__ int load_binaryFile(char *fileName, void *buffer, int bufferSize)
{
FILE *fp;
fp = fopen(fileName, "rb");
if (!fp)
{
printf("***Unable to open file %s***\n", fileName);
exit(1);
}
//Get file length
fseek(fp, 0, SEEK_END);
int fileLen = ftell(fp);
fseek(fp, 0, SEEK_SET);
//printf("fileLen: %ld\n", fileLen);
if (fileLen > bufferSize)
{
printf("****invalid testcase!!****\n");
printf("****software warrning: the file: %s size****\n", fileName);
printf("****is greater than buffer size****\n");
exit(1);
}
//Read file contents into buffer
fread(buffer, fileLen, 1, fp);
fclose(fp);
return fileLen;
}
int main()
{
cudaError_t cudaStatus;
int input_size = load_binaryFile(DATAFILE, input, STORAGE_SIZE);
mykernel << <1, 1, 16384 >> > (input_size);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "mykernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
return 0;
}
printf("input size: %d\n", input_size);
cudaDeviceSynchronize();
cudaDeviceReset();
write_binaryFile(OUTFILE, results, input_size);
printf("pagefault number is %d\n", PAGEFAULT_NUM);
return 0;
} |
cee911bc7c0bc47cbb548346814d258fe6d15dea.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "sdtbs_cu.h"
#include <pthread.h>
#include <unistd.h>
__device__ tbs_type_t d_tbs_type;
__device__ skrun_t *d_skruns;
__device__ unsigned *d_mtbs_done_cnts;
static skrun_t *g_skruns;
static unsigned *g_mtbs_done_cnts;
static unsigned *info_n_mtbs;
static BOOL *skrun_dones;
static unsigned skrid_done_min;
static unsigned cur_skrid_host;
static BOOL checker_done;
static pthread_t checker;
static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
static pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
static hipStream_t strm_submit;
#define SK_PROTO(name) __device__ int name(void *args[])
#define SK_FUNCS(base) SK_PROTO(base);
SK_FUNCS(loopcalc)
SK_FUNCS(mklc)
SK_FUNCS(gma)
SK_FUNCS(lma)
SK_FUNCS(kmeans)
static __device__ int
run_sub_kernel_func(skid_t skid, void *args[])
{
switch (skid) {
case LOOPCALC:
return loopcalc(args);
case MKLC:
return mklc(args);
case GMA:
return gma(args);
case LMA:
return lma(args);
case KMEANS:
return kmeans(args);
default:
return 0;
}
}
__device__ void
run_sub_kernel(skrid_t skrid)
{
skrun_t *skr;
int res;
skr = &d_skruns[skrid - 1];
res = run_sub_kernel_func(skr->skid, skr->args);
if (get_threadIdxX() == 0)
skr->res = res;
}
__global__ void
sub_kernel_func(skrid_t skrid)
{
run_sub_kernel(skrid);
}
static skrid_t
submit_skrun(skid_t skid, dim3 dimGrid, dim3 dimBlock, void *args[])
{
skrid_t skrid;
skrun_t skrun;
skrun.skid = skid;
skrun.dimGrid = dimGrid;
skrun.dimBlock = dimBlock;
memcpy(skrun.args, args, sizeof(void *) * MAX_ARGS);
skrun.res = 0;
skrun.n_tbs = dimGrid.x * dimGrid.y;
skrun.n_mtbs_per_tb = dimBlock.x * dimBlock.y / N_THREADS_PER_mTB;
pthread_mutex_lock(&mutex);
skrid = cur_skrid_host + 1;
info_n_mtbs[skrid - 1] = skrun.n_tbs * skrun.n_mtbs_per_tb;
hipMemcpyAsync(g_skruns + cur_skrid_host, &skrun, sizeof(skrun_t), hipMemcpyHostToDevice, strm_submit);
hipStreamSynchronize(strm_submit);
cur_skrid_host++;
pthread_mutex_unlock(&mutex);
return skrid;
}
skrid_t
launch_kernel(skid_t skid, hipStream_t strm, dim3 dimGrid, dim3 dimBlock, void *args[])
{
skrid_t skrid;
skrid = submit_skrun(skid, dimGrid, dimBlock, args);
if (sched->type == TBS_TYPE_HW)
hipLaunchKernelGGL(( sub_kernel_func), dim3(dimGrid), dim3(dimBlock), 0, strm, skrid);
return skrid;
}
static void
wait_skrun(skrid_t skrid)
{
pthread_mutex_lock(&mutex);
while (!checker_done && !skrun_dones[skrid - 1])
pthread_cond_wait(&cond, &mutex);
pthread_mutex_unlock(&mutex);
}
void
wait_kernel(skrid_t skrid, hipStream_t strm, int *pres)
{
skrun_t *skr;
int res;
if (sched->type == TBS_TYPE_HW)
hipStreamSynchronize(strm);
else
wait_skrun(skrid);
skr = g_skruns + (skrid - 1);
hipMemcpyAsync(&res, &skr->res, sizeof(int), hipMemcpyDeviceToHost, strm);
hipStreamSynchronize(strm);
*pres = res;
}
static void
notify_done_skruns(unsigned *mtbs_done_cnts, unsigned n_checks)
{
unsigned min_new = skrid_done_min;
BOOL notify = FALSE;
unsigned i;
pthread_mutex_lock(&mutex);
for (i = 0; i < n_checks; i++) {
if (skrun_dones[i + skrid_done_min])
continue;
if (mtbs_done_cnts[i] == info_n_mtbs[i + skrid_done_min]) {
notify = TRUE;
skrun_dones[i + skrid_done_min] = TRUE;
if (min_new == i + skrid_done_min) {
min_new++;
}
}
}
skrid_done_min = min_new;
if (notify)
pthread_cond_broadcast(&cond);
pthread_mutex_unlock(&mutex);
}
static void *
skruns_checkfunc(void *arg)
{
hipStream_t strm;
hipStreamCreate(&strm);
while (!checker_done) {
unsigned n_checks = cur_skrid_host - skrid_done_min;
if (n_checks > 0) {
unsigned *mtbs_done_cnts = (unsigned *)malloc(sizeof(unsigned) * n_checks);
hipMemcpyAsync(mtbs_done_cnts, g_mtbs_done_cnts + skrid_done_min, sizeof(unsigned) * n_checks, hipMemcpyDeviceToHost, strm);
hipStreamSynchronize(strm);
notify_done_skruns(mtbs_done_cnts, n_checks);
free(mtbs_done_cnts);
}
usleep(100);
}
hipStreamDestroy(strm);
return NULL;
}
__global__ void
kernel_init_skrun(tbs_type_t type, skrun_t *skruns, unsigned *mtbs_done_cnts)
{
int i;
d_tbs_type = type;
d_skruns = skruns;
d_mtbs_done_cnts = mtbs_done_cnts;
for (i = 0; i < MAX_QUEUED_KERNELS; i++) {
skruns[i].skid = 0;
mtbs_done_cnts[i] = 0;
}
}
void
init_skrun(void)
{
hipError_t err;
hipStreamCreate(&strm_submit);
hipMalloc(&g_skruns, sizeof(skrun_t) * MAX_QUEUED_KERNELS);
hipMalloc(&g_mtbs_done_cnts, sizeof(unsigned) * MAX_QUEUED_KERNELS);
info_n_mtbs = (unsigned *)calloc(MAX_QUEUED_KERNELS, sizeof(unsigned));
skrun_dones = (BOOL *)calloc(MAX_QUEUED_KERNELS, sizeof(BOOL));
pthread_create(&checker, NULL, skruns_checkfunc, NULL);
dim3 dimGrid(1,1), dimBlock(1,1);
hipLaunchKernelGGL(( kernel_init_skrun), dim3(dimGrid), dim3(dimBlock), 0, 0, sched->type, g_skruns, g_mtbs_done_cnts);
err = hipGetLastError();
if (err != hipSuccess)
error("failed to initialize skrun: %s\n", hipGetErrorString(err));
else
hipDeviceSynchronize();
}
void
fini_skrun(void)
{
void *retval;
checker_done = TRUE;
pthread_join(checker, &retval);
}
| cee911bc7c0bc47cbb548346814d258fe6d15dea.cu | #include "sdtbs_cu.h"
#include <pthread.h>
#include <unistd.h>
__device__ tbs_type_t d_tbs_type;
__device__ skrun_t *d_skruns;
__device__ unsigned *d_mtbs_done_cnts;
static skrun_t *g_skruns;
static unsigned *g_mtbs_done_cnts;
static unsigned *info_n_mtbs;
static BOOL *skrun_dones;
static unsigned skrid_done_min;
static unsigned cur_skrid_host;
static BOOL checker_done;
static pthread_t checker;
static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
static pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
static cudaStream_t strm_submit;
#define SK_PROTO(name) __device__ int name(void *args[])
#define SK_FUNCS(base) SK_PROTO(base);
SK_FUNCS(loopcalc)
SK_FUNCS(mklc)
SK_FUNCS(gma)
SK_FUNCS(lma)
SK_FUNCS(kmeans)
static __device__ int
run_sub_kernel_func(skid_t skid, void *args[])
{
switch (skid) {
case LOOPCALC:
return loopcalc(args);
case MKLC:
return mklc(args);
case GMA:
return gma(args);
case LMA:
return lma(args);
case KMEANS:
return kmeans(args);
default:
return 0;
}
}
__device__ void
run_sub_kernel(skrid_t skrid)
{
skrun_t *skr;
int res;
skr = &d_skruns[skrid - 1];
res = run_sub_kernel_func(skr->skid, skr->args);
if (get_threadIdxX() == 0)
skr->res = res;
}
__global__ void
sub_kernel_func(skrid_t skrid)
{
run_sub_kernel(skrid);
}
static skrid_t
submit_skrun(skid_t skid, dim3 dimGrid, dim3 dimBlock, void *args[])
{
skrid_t skrid;
skrun_t skrun;
skrun.skid = skid;
skrun.dimGrid = dimGrid;
skrun.dimBlock = dimBlock;
memcpy(skrun.args, args, sizeof(void *) * MAX_ARGS);
skrun.res = 0;
skrun.n_tbs = dimGrid.x * dimGrid.y;
skrun.n_mtbs_per_tb = dimBlock.x * dimBlock.y / N_THREADS_PER_mTB;
pthread_mutex_lock(&mutex);
skrid = cur_skrid_host + 1;
info_n_mtbs[skrid - 1] = skrun.n_tbs * skrun.n_mtbs_per_tb;
cudaMemcpyAsync(g_skruns + cur_skrid_host, &skrun, sizeof(skrun_t), cudaMemcpyHostToDevice, strm_submit);
cudaStreamSynchronize(strm_submit);
cur_skrid_host++;
pthread_mutex_unlock(&mutex);
return skrid;
}
skrid_t
launch_kernel(skid_t skid, cudaStream_t strm, dim3 dimGrid, dim3 dimBlock, void *args[])
{
skrid_t skrid;
skrid = submit_skrun(skid, dimGrid, dimBlock, args);
if (sched->type == TBS_TYPE_HW)
sub_kernel_func<<<dimGrid, dimBlock, 0, strm>>>(skrid);
return skrid;
}
static void
wait_skrun(skrid_t skrid)
{
pthread_mutex_lock(&mutex);
while (!checker_done && !skrun_dones[skrid - 1])
pthread_cond_wait(&cond, &mutex);
pthread_mutex_unlock(&mutex);
}
void
wait_kernel(skrid_t skrid, cudaStream_t strm, int *pres)
{
skrun_t *skr;
int res;
if (sched->type == TBS_TYPE_HW)
cudaStreamSynchronize(strm);
else
wait_skrun(skrid);
skr = g_skruns + (skrid - 1);
cudaMemcpyAsync(&res, &skr->res, sizeof(int), cudaMemcpyDeviceToHost, strm);
cudaStreamSynchronize(strm);
*pres = res;
}
static void
notify_done_skruns(unsigned *mtbs_done_cnts, unsigned n_checks)
{
unsigned min_new = skrid_done_min;
BOOL notify = FALSE;
unsigned i;
pthread_mutex_lock(&mutex);
for (i = 0; i < n_checks; i++) {
if (skrun_dones[i + skrid_done_min])
continue;
if (mtbs_done_cnts[i] == info_n_mtbs[i + skrid_done_min]) {
notify = TRUE;
skrun_dones[i + skrid_done_min] = TRUE;
if (min_new == i + skrid_done_min) {
min_new++;
}
}
}
skrid_done_min = min_new;
if (notify)
pthread_cond_broadcast(&cond);
pthread_mutex_unlock(&mutex);
}
static void *
skruns_checkfunc(void *arg)
{
cudaStream_t strm;
cudaStreamCreate(&strm);
while (!checker_done) {
unsigned n_checks = cur_skrid_host - skrid_done_min;
if (n_checks > 0) {
unsigned *mtbs_done_cnts = (unsigned *)malloc(sizeof(unsigned) * n_checks);
cudaMemcpyAsync(mtbs_done_cnts, g_mtbs_done_cnts + skrid_done_min, sizeof(unsigned) * n_checks, cudaMemcpyDeviceToHost, strm);
cudaStreamSynchronize(strm);
notify_done_skruns(mtbs_done_cnts, n_checks);
free(mtbs_done_cnts);
}
usleep(100);
}
cudaStreamDestroy(strm);
return NULL;
}
__global__ void
kernel_init_skrun(tbs_type_t type, skrun_t *skruns, unsigned *mtbs_done_cnts)
{
int i;
d_tbs_type = type;
d_skruns = skruns;
d_mtbs_done_cnts = mtbs_done_cnts;
for (i = 0; i < MAX_QUEUED_KERNELS; i++) {
skruns[i].skid = 0;
mtbs_done_cnts[i] = 0;
}
}
void
init_skrun(void)
{
cudaError_t err;
cudaStreamCreate(&strm_submit);
cudaMalloc(&g_skruns, sizeof(skrun_t) * MAX_QUEUED_KERNELS);
cudaMalloc(&g_mtbs_done_cnts, sizeof(unsigned) * MAX_QUEUED_KERNELS);
info_n_mtbs = (unsigned *)calloc(MAX_QUEUED_KERNELS, sizeof(unsigned));
skrun_dones = (BOOL *)calloc(MAX_QUEUED_KERNELS, sizeof(BOOL));
pthread_create(&checker, NULL, skruns_checkfunc, NULL);
dim3 dimGrid(1,1), dimBlock(1,1);
kernel_init_skrun<<<dimGrid, dimBlock>>>(sched->type, g_skruns, g_mtbs_done_cnts);
err = cudaGetLastError();
if (err != cudaSuccess)
error("failed to initialize skrun: %s\n", cudaGetErrorString(err));
else
cudaDeviceSynchronize();
}
void
fini_skrun(void)
{
void *retval;
checker_done = TRUE;
pthread_join(checker, &retval);
}
|
3a55189043ef41635fbf7dc60cdcc6a0f91f8147.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuUtil.cu"
#include "scan.h"
#include "comm.h"
//#include "wtime.h"
#include "graph.h"
#include <stdio.h>
#include "iostream"
#define max_thd 256
#define max_block 256
using namespace std;
__global__ void block_binary_kernel
( //vertex_t* head,
//vertex_t* adj,
Edge* workload,
vertex_t* adj_list,
index_t* begin,
index_t Ns,
index_t Ne,
index_t* count
)
{
//phase 1, partition
index_t tid = Ns + (threadIdx.x + blockIdx.x * blockDim.x)/ max_thd;
int i = threadIdx.x% max_thd;
index_t mycount=0;
// __shared__ vertex_t cache[256];
__shared__ index_t local[max_thd];
while(tid<Ne){
// vertex_t A = head[tid];
// vertex_t B = adj[tid];
vertex_t A = workload[tid].A;
vertex_t B = workload[tid].B;
index_t m = begin[A+1]-begin[A];//degree[A];
index_t n = begin[B+1]-begin[B];//degree[B];
index_t temp;
if(m<n){
temp = A;
A = B;
B = temp;
temp = m;
m = n;
n = temp;
}
vertex_t* a = &(adj_list[begin[A]]);
vertex_t* b = &(adj_list[begin[B]]);
//initial cache
local[i]=a[i*m/max_thd];
__syncthreads();
//search
int j=i;
while(j<n){
vertex_t X = b[j];
vertex_t Y;
//phase 1: cache
int bot = 0;
int top = max_thd;
int r;
while(top>bot+1){
r = (top+bot)/2;
Y = local[r];
if(X==Y){
//printf("find A %d B %d C %d\n",A,B,X);
mycount++;
bot = top + max_thd;
}
if(X<Y){
top = r;
}
if(X>Y){
bot = r;
}
}
//phase 2
bot = bot*m/max_thd;
top = top*m/max_thd -1;
while(top>=bot){
r = (top+bot)/2;
Y = a[r];
if(X==Y){
mycount++;
//printf("find A %d B %d C %d\n",A,B,X);
}
if(X<=Y){
top = r-1;
}
if(X>=Y){
bot = r+1;
}
}
j += max_thd;
}
tid += GPU_PER_PART * gridDim.x*blockDim.x/256;
__syncthreads();
}
//reduce
__syncthreads();
local[threadIdx.x] = mycount;
__syncthreads();
if(threadIdx.x==0){
index_t val=0;
for(int i=0; i<blockDim.x; i++){
val+= local[i];
}
// count[blockIdx.x]+=val;
count[blockIdx.x]=val;
// if(val!=0)
// printf("+ %d\n",count[blockIdx.x]);
}
}
__global__ void warp_binary_kernel
( //vertex_t* head,
//vertex_t* adj,
Edge* workload,
vertex_t* adj_list,
index_t* begin,
index_t Ns,
index_t Ne,
index_t* count
)
{
//phase 1, partition
index_t tid = (threadIdx.x + blockIdx.x * blockDim.x)/32 + Ns;
index_t mycount=0;
__shared__ index_t local[max_thd];
int i = threadIdx.x%32;
int p = threadIdx.x/32;
while(tid<Ne){
vertex_t A = workload[tid].A;
vertex_t B = workload[tid].B;
index_t m = begin[A+1]-begin[A];//degree[A];
index_t n = begin[B+1]-begin[B];//degree[B];
//if(i==0) printf("A %d B %d\n");
index_t temp;
if(m<n){
temp = A;
A = B;
B = temp;
temp = m;
m = n;
n = temp;
}
vertex_t* a = &(adj_list[begin[A]]);
vertex_t* b = &(adj_list[begin[B]]);
//initial cache
local[p*32+i]=a[i*m/32];
__syncthreads();
//search
int j=i;
while(j<n){
vertex_t X = b[j];
vertex_t Y;
//phase 1: cache
int bot = 0;
int top = 32;
int r;
while(top>bot+1){
r = (top+bot)/2;
Y = local[p*32+r];
if(X==Y){
mycount++;
bot = top + 32;
//printf("find A %d B %d C %d\n",A,B,X);
}
if(X<Y){
top = r;
}
if(X>Y){
bot = r;
}
}
//phase 2
bot = bot*m/32;
top = top*m/32 -1;
while(top>=bot){
r = (top+bot)/2;
Y = a[r];
if(X==Y){
mycount++;
//printf("find A %d B %d C %d\n",A,B,X);
}
if(X<=Y){
top = r-1;
}
if(X>=Y){
bot = r+1;
}
}
j += 32;
}
// tid += GPU_NUM* blockDim.x*gridDim.x/32;
tid += blockDim.x*gridDim.x/32;
__syncthreads();
}
__syncthreads();
//reduce
local[threadIdx.x] = mycount;
__syncthreads();
if(threadIdx.x==0){
index_t val=0;
for(int i=0; i<blockDim.x; i++){
val+= local[i];
}
// count[blockIdx.x]=val;
count[blockIdx.x]+=val;
}
__syncthreads();
}
__global__ void init_count(index_t* count)
{
int tid = threadIdx.x;
count[tid] = 0;
}
__global__ void reduce_kernel(index_t* count)
{
index_t val = 0;
for(int i=0; i<max_block; i++){
val += count[i];
}
count[0] = val;
}
//---------------------------------------- cpu function--------------------
//------------------------------------------------------------------
void initDevice(graph* g, int GPU_id,int Part_id){
//cuda memory copy of partAdj and partBegin
hipSetDevice(GPU_id);
int P=Part_id;
H_ERR(hipDeviceSynchronize() );
vertex_t vert_count= g->vert_count;
vertex_t* dev_adj;
index_t* dev_begin;
index_t* dev_count;
Edge* buffer0;
Edge* buffer1;
index_t EdgeCount = g->partEdgeCount[P];
vertex_t* Adj = g->partAdj[P];
index_t* Begin = g->partBegin[P];
H_ERR(hipMalloc(&dev_adj, EdgeCount*sizeof(vertex_t)) );
H_ERR(hipMalloc(&dev_begin, (vert_count+1)*sizeof(index_t)) );
H_ERR(hipMalloc(&dev_count, max_block*sizeof(index_t)) );
H_ERR(hipMemcpy(dev_adj, Adj, EdgeCount*sizeof(vertex_t), hipMemcpyHostToDevice) );
H_ERR(hipMemcpy(dev_begin, Begin, (vert_count+1)*sizeof(index_t), hipMemcpyHostToDevice) );
H_ERR(hipMalloc(&buffer0, BufferSize*sizeof(Edge)) );
H_ERR(hipMalloc(&buffer1, BufferSize*sizeof(Edge)) );
g->gdata[GPU_id].adj = dev_adj;
g->gdata[GPU_id].begin = dev_begin;
g->gdata[GPU_id].count = dev_count;
g->gdata[GPU_id].EdgeBuffer[0]= buffer0;
g->gdata[GPU_id].EdgeBuffer[1]= buffer1;
g->gdata[GPU_id].partition_id = P;
g->gdata[GPU_id].currentBuffer= 0;
hipLaunchKernelGGL(( init_count) , dim3(1),dim3(max_thd), 0, 0, dev_count);
}
void DeviceCompute(graph* g, int GPU_id, index_t Chunk_id){
int P = g->gdata[GPU_id].partition_id;
// if(ds_status[P][Chunk_id]!=0) return;
// ds_status[P][Chunk_id]=1;
// if(ds_progress[P]<Chunk_id+1) ds_progress[P] = Chunk_id+1;
//control
vertex_t* dev_adj =g->gdata[GPU_id].adj;
index_t* dev_begin =g->gdata[GPU_id].begin;
index_t* dev_count =g->gdata[GPU_id].count;
Edge* buffer =g->gdata[GPU_id].EdgeBuffer[g->gdata[GPU_id].currentBuffer%2];
g->gdata[GPU_id].currentBuffer =1-g->gdata[GPU_id].currentBuffer;
index_t currentBufferSize = BufferSize;
if(Chunk_id==g->upperEdgeCount/BufferSize){
currentBufferSize = g->upperEdgeCount % BufferSize;
}
hipLaunchKernelGGL(( init_count) , dim3(1),dim3(max_thd), 0, 0, dev_count);
H_ERR(hipMemcpy(buffer, &g->OrientedEdge[Chunk_id*BufferSize], currentBufferSize*sizeof(Edge), hipMemcpyHostToDevice) );
H_ERR(hipDeviceSynchronize() );
hipLaunchKernelGGL(( warp_binary_kernel), dim3(max_block),dim3(max_thd), 0, 0,
buffer,
dev_adj,
dev_begin,
0,
// GPU_id*256*256/32,
currentBufferSize,
dev_count
);
//write the result of this chunk back
H_ERR(hipDeviceSynchronize() );
index_t tempcount[max_block];
index_t mycount=0;
H_ERR(hipMemcpy(tempcount, dev_count, max_block*sizeof(index_t), hipMemcpyDeviceToHost));
for(int i=0; i<max_block; i++) mycount += tempcount[i];
g->ds_count[P][Chunk_id] = mycount;
}
void gpuReduce(graph* g, int GPU_id){
vertex_t* dev_adj =g->gdata[GPU_id].adj;
index_t* dev_begin =g->gdata[GPU_id].begin;
index_t* dev_count =g->gdata[GPU_id].count;
Edge** buffer =g->gdata[GPU_id].EdgeBuffer;
// H_ERR(hipDeviceSynchronize() );
// reduce_kernel <<<1,max_thd>>>(dev_count);
// H_ERR(hipMemcpy(&count[GPU_id], dev_count, sizeof(index_t), hipMemcpyDeviceToHost));
// thd_count += count[i];
// count[i] = thd_count;
H_ERR(hipFree(dev_adj) );
H_ERR(hipFree(dev_begin) );
H_ERR(hipFree(dev_count) );
H_ERR(hipFree(buffer[0]) );
H_ERR(hipFree(buffer[1]) );
// cout<<"GPU "<<GPU_id<<" finished"<<endl;
}
void gpuProc(graph *g,int GPU_id){
//double t0 = wtime();
index_t total_count=0;
for(int P=0; P<PART_NUM; P++){
// int P = GPU_id/4;
// if(PART_NUM > 1) int P = GPU_id%PART_NUM;
initDevice(g,GPU_id,P);
for(index_t i=GPU_id; i<g->ChunkNum; i+=GPU_NUM ){
// for(index_t i=GPU_id; i<g->ChunkNum; i+= 8 ){
// if(i%8<6)
DeviceCompute(g,GPU_id,i);
}
// index_t chunk_id = GPU_id;
// while(ds_progress[P]< ChunkNum){
// chunk_id = ds_progress[P];
// DeviceCompute(P,chunk_id);
// }
gpuReduce(g,GPU_id);
// total_count += g->count[GPU_id];
}
// g->count[GPU_id] = total_count;
//double t1 = wtime();
//cout<<"GPU "<<GPU_id<<" time = "<<t1-t0<<endl;
}
| 3a55189043ef41635fbf7dc60cdcc6a0f91f8147.cu | #include "cuUtil.cu"
#include "scan.h"
#include "comm.h"
//#include "wtime.h"
#include "graph.h"
#include <stdio.h>
#include "iostream"
#define max_thd 256
#define max_block 256
using namespace std;
__global__ void block_binary_kernel
( //vertex_t* head,
//vertex_t* adj,
Edge* workload,
vertex_t* adj_list,
index_t* begin,
index_t Ns,
index_t Ne,
index_t* count
)
{
//phase 1, partition
index_t tid = Ns + (threadIdx.x + blockIdx.x * blockDim.x)/ max_thd;
int i = threadIdx.x% max_thd;
index_t mycount=0;
// __shared__ vertex_t cache[256];
__shared__ index_t local[max_thd];
while(tid<Ne){
// vertex_t A = head[tid];
// vertex_t B = adj[tid];
vertex_t A = workload[tid].A;
vertex_t B = workload[tid].B;
index_t m = begin[A+1]-begin[A];//degree[A];
index_t n = begin[B+1]-begin[B];//degree[B];
index_t temp;
if(m<n){
temp = A;
A = B;
B = temp;
temp = m;
m = n;
n = temp;
}
vertex_t* a = &(adj_list[begin[A]]);
vertex_t* b = &(adj_list[begin[B]]);
//initial cache
local[i]=a[i*m/max_thd];
__syncthreads();
//search
int j=i;
while(j<n){
vertex_t X = b[j];
vertex_t Y;
//phase 1: cache
int bot = 0;
int top = max_thd;
int r;
while(top>bot+1){
r = (top+bot)/2;
Y = local[r];
if(X==Y){
//printf("find A %d B %d C %d\n",A,B,X);
mycount++;
bot = top + max_thd;
}
if(X<Y){
top = r;
}
if(X>Y){
bot = r;
}
}
//phase 2
bot = bot*m/max_thd;
top = top*m/max_thd -1;
while(top>=bot){
r = (top+bot)/2;
Y = a[r];
if(X==Y){
mycount++;
//printf("find A %d B %d C %d\n",A,B,X);
}
if(X<=Y){
top = r-1;
}
if(X>=Y){
bot = r+1;
}
}
j += max_thd;
}
tid += GPU_PER_PART * gridDim.x*blockDim.x/256;
__syncthreads();
}
//reduce
__syncthreads();
local[threadIdx.x] = mycount;
__syncthreads();
if(threadIdx.x==0){
index_t val=0;
for(int i=0; i<blockDim.x; i++){
val+= local[i];
}
// count[blockIdx.x]+=val;
count[blockIdx.x]=val;
// if(val!=0)
// printf("+ %d\n",count[blockIdx.x]);
}
}
__global__ void warp_binary_kernel
( //vertex_t* head,
//vertex_t* adj,
Edge* workload,
vertex_t* adj_list,
index_t* begin,
index_t Ns,
index_t Ne,
index_t* count
)
{
//phase 1, partition
index_t tid = (threadIdx.x + blockIdx.x * blockDim.x)/32 + Ns;
index_t mycount=0;
__shared__ index_t local[max_thd];
int i = threadIdx.x%32;
int p = threadIdx.x/32;
while(tid<Ne){
vertex_t A = workload[tid].A;
vertex_t B = workload[tid].B;
index_t m = begin[A+1]-begin[A];//degree[A];
index_t n = begin[B+1]-begin[B];//degree[B];
//if(i==0) printf("A %d B %d\n");
index_t temp;
if(m<n){
temp = A;
A = B;
B = temp;
temp = m;
m = n;
n = temp;
}
vertex_t* a = &(adj_list[begin[A]]);
vertex_t* b = &(adj_list[begin[B]]);
//initial cache
local[p*32+i]=a[i*m/32];
__syncthreads();
//search
int j=i;
while(j<n){
vertex_t X = b[j];
vertex_t Y;
//phase 1: cache
int bot = 0;
int top = 32;
int r;
while(top>bot+1){
r = (top+bot)/2;
Y = local[p*32+r];
if(X==Y){
mycount++;
bot = top + 32;
//printf("find A %d B %d C %d\n",A,B,X);
}
if(X<Y){
top = r;
}
if(X>Y){
bot = r;
}
}
//phase 2
bot = bot*m/32;
top = top*m/32 -1;
while(top>=bot){
r = (top+bot)/2;
Y = a[r];
if(X==Y){
mycount++;
//printf("find A %d B %d C %d\n",A,B,X);
}
if(X<=Y){
top = r-1;
}
if(X>=Y){
bot = r+1;
}
}
j += 32;
}
// tid += GPU_NUM* blockDim.x*gridDim.x/32;
tid += blockDim.x*gridDim.x/32;
__syncthreads();
}
__syncthreads();
//reduce
local[threadIdx.x] = mycount;
__syncthreads();
if(threadIdx.x==0){
index_t val=0;
for(int i=0; i<blockDim.x; i++){
val+= local[i];
}
// count[blockIdx.x]=val;
count[blockIdx.x]+=val;
}
__syncthreads();
}
__global__ void init_count(index_t* count)
{
int tid = threadIdx.x;
count[tid] = 0;
}
__global__ void reduce_kernel(index_t* count)
{
index_t val = 0;
for(int i=0; i<max_block; i++){
val += count[i];
}
count[0] = val;
}
//---------------------------------------- cpu function--------------------
//------------------------------------------------------------------
void initDevice(graph* g, int GPU_id,int Part_id){
//cuda memory copy of partAdj and partBegin
cudaSetDevice(GPU_id);
int P=Part_id;
H_ERR(cudaDeviceSynchronize() );
vertex_t vert_count= g->vert_count;
vertex_t* dev_adj;
index_t* dev_begin;
index_t* dev_count;
Edge* buffer0;
Edge* buffer1;
index_t EdgeCount = g->partEdgeCount[P];
vertex_t* Adj = g->partAdj[P];
index_t* Begin = g->partBegin[P];
H_ERR(cudaMalloc(&dev_adj, EdgeCount*sizeof(vertex_t)) );
H_ERR(cudaMalloc(&dev_begin, (vert_count+1)*sizeof(index_t)) );
H_ERR(cudaMalloc(&dev_count, max_block*sizeof(index_t)) );
H_ERR(cudaMemcpy(dev_adj, Adj, EdgeCount*sizeof(vertex_t), cudaMemcpyHostToDevice) );
H_ERR(cudaMemcpy(dev_begin, Begin, (vert_count+1)*sizeof(index_t), cudaMemcpyHostToDevice) );
H_ERR(cudaMalloc(&buffer0, BufferSize*sizeof(Edge)) );
H_ERR(cudaMalloc(&buffer1, BufferSize*sizeof(Edge)) );
g->gdata[GPU_id].adj = dev_adj;
g->gdata[GPU_id].begin = dev_begin;
g->gdata[GPU_id].count = dev_count;
g->gdata[GPU_id].EdgeBuffer[0]= buffer0;
g->gdata[GPU_id].EdgeBuffer[1]= buffer1;
g->gdata[GPU_id].partition_id = P;
g->gdata[GPU_id].currentBuffer= 0;
init_count <<<1,max_thd>>>(dev_count);
}
void DeviceCompute(graph* g, int GPU_id, index_t Chunk_id){
int P = g->gdata[GPU_id].partition_id;
// if(ds_status[P][Chunk_id]!=0) return;
// ds_status[P][Chunk_id]=1;
// if(ds_progress[P]<Chunk_id+1) ds_progress[P] = Chunk_id+1;
//control
vertex_t* dev_adj =g->gdata[GPU_id].adj;
index_t* dev_begin =g->gdata[GPU_id].begin;
index_t* dev_count =g->gdata[GPU_id].count;
Edge* buffer =g->gdata[GPU_id].EdgeBuffer[g->gdata[GPU_id].currentBuffer%2];
g->gdata[GPU_id].currentBuffer =1-g->gdata[GPU_id].currentBuffer;
index_t currentBufferSize = BufferSize;
if(Chunk_id==g->upperEdgeCount/BufferSize){
currentBufferSize = g->upperEdgeCount % BufferSize;
}
init_count <<<1,max_thd>>>(dev_count);
H_ERR(cudaMemcpy(buffer, &g->OrientedEdge[Chunk_id*BufferSize], currentBufferSize*sizeof(Edge), cudaMemcpyHostToDevice) );
H_ERR(cudaDeviceSynchronize() );
warp_binary_kernel<<<max_block,max_thd>>>
( buffer,
dev_adj,
dev_begin,
0,
// GPU_id*256*256/32,
currentBufferSize,
dev_count
);
//write the result of this chunk back
H_ERR(cudaDeviceSynchronize() );
index_t tempcount[max_block];
index_t mycount=0;
H_ERR(cudaMemcpy(tempcount, dev_count, max_block*sizeof(index_t), cudaMemcpyDeviceToHost));
for(int i=0; i<max_block; i++) mycount += tempcount[i];
g->ds_count[P][Chunk_id] = mycount;
}
void gpuReduce(graph* g, int GPU_id){
vertex_t* dev_adj =g->gdata[GPU_id].adj;
index_t* dev_begin =g->gdata[GPU_id].begin;
index_t* dev_count =g->gdata[GPU_id].count;
Edge** buffer =g->gdata[GPU_id].EdgeBuffer;
// H_ERR(cudaDeviceSynchronize() );
// reduce_kernel <<<1,max_thd>>>(dev_count);
// H_ERR(cudaMemcpy(&count[GPU_id], dev_count, sizeof(index_t), cudaMemcpyDeviceToHost));
// thd_count += count[i];
// count[i] = thd_count;
H_ERR(cudaFree(dev_adj) );
H_ERR(cudaFree(dev_begin) );
H_ERR(cudaFree(dev_count) );
H_ERR(cudaFree(buffer[0]) );
H_ERR(cudaFree(buffer[1]) );
// cout<<"GPU "<<GPU_id<<" finished"<<endl;
}
void gpuProc(graph *g,int GPU_id){
//double t0 = wtime();
index_t total_count=0;
for(int P=0; P<PART_NUM; P++){
// int P = GPU_id/4;
// if(PART_NUM > 1) int P = GPU_id%PART_NUM;
initDevice(g,GPU_id,P);
for(index_t i=GPU_id; i<g->ChunkNum; i+=GPU_NUM ){
// for(index_t i=GPU_id; i<g->ChunkNum; i+= 8 ){
// if(i%8<6)
DeviceCompute(g,GPU_id,i);
}
// index_t chunk_id = GPU_id;
// while(ds_progress[P]< ChunkNum){
// chunk_id = ds_progress[P];
// DeviceCompute(P,chunk_id);
// }
gpuReduce(g,GPU_id);
// total_count += g->count[GPU_id];
}
// g->count[GPU_id] = total_count;
//double t1 = wtime();
//cout<<"GPU "<<GPU_id<<" time = "<<t1-t0<<endl;
}
|
0e68f82c77b8ef04232130b069a504811214b96a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma ([email protected]), created on 08.11.2018
// @author [email protected]
//
#include "../scalar_bool.h"
#include <op_boilerplate.h>
#include <types/types.h>
#include "../legacy_ops.h"
using namespace simdOps;
////////////////////////////////////////////////////////////////////////
template <typename X, typename Z, typename OpType>
__global__ void scalarAlongDimension(void *x, Nd4jLong *xShapeInfo,
void *extraParams,
void *z, Nd4jLong *zShapeInfo,
void *scalars,
int *dimension, int dimensionLength,
Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets,
Nd4jLong *tadShapeInfoZ, Nd4jLong *tadOffsetsZ) {
functions::scalar::ScalarBoolTransform<X,Z>::template transformCuda<OpType>(x, xShapeInfo, extraParams, z, zShapeInfo, scalars, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ);
}
////////////////////////////////////////////////////////////////////////
template <typename X, typename Z, typename OpType>
__global__ void scalarSimpleShaped(void* x, void *y, Nd4jLong *xShapeInfo, void *params, void *z, Nd4jLong *zShapeInfo, int *allocationBuffer) {
functions::scalar::ScalarBoolTransform<X,Z>::template transformCuda<OpType>(y, x, xShapeInfo, params, z, zShapeInfo, allocationBuffer);
}
// *********************************************************************//
// *********************************************************************//
namespace functions {
namespace scalar {
////////////////////////////////////////////////////////////////////////
template<typename X, typename Z>
template<typename OpType>
__device__ void ScalarBoolTransform<X, Z>::transformCuda(void* vscalar,
void *vy, Nd4jLong *yShapeInfo,
void *vparams,
void *vz, Nd4jLong *zShapeInfo,
int *allocationBuffer) {
auto scalar = reinterpret_cast<X*>(vscalar)[0];
auto y = reinterpret_cast<X*>(vy);
auto params = reinterpret_cast<X*>(vparams);
auto z = reinterpret_cast<Z*>(vz);
auto yRank = shape::rank(yShapeInfo);
auto yEWS = shape::elementWiseStride(yShapeInfo);
auto yShape = shape::shapeOf(yShapeInfo);
auto yStride = shape::stride(yShapeInfo);
auto zRank = shape::rank(zShapeInfo);
auto zEWS = shape::elementWiseStride(zShapeInfo);
auto zShape = shape::shapeOf(zShapeInfo);
auto zStride = shape::stride(zShapeInfo);
int totalThreads = gridDim.x * blockDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ int len;
if(threadIdx.x == 0)
len = shape::length(yShapeInfo);
__syncthreads();
if(yEWS >= 1 && zEWS >= 1 && shape::order(yShapeInfo) == shape::order(zShapeInfo)) {
transformCuda<OpType>(len, vscalar, vy, yEWS, vparams, vz, zEWS, allocationBuffer);
}
else {
for (Nd4jLong i = tid; i < len; i+= totalThreads)
z[shape::getIndexOffset(i, zShapeInfo, len)] = OpType::op(y[shape::getIndexOffset(i, yShapeInfo, len)], scalar, params);
}
}
////////////////////////////////////////////////////////////////////////
template<typename X, typename Z>
template<typename OpType>
__device__ void ScalarBoolTransform<X, Z>::transformCuda(Nd4jLong len,
void* vx,
void *vy, Nd4jLong yEWS,
void *vparams,
void *vz, Nd4jLong zEWS,
int *allocationBuffer) {
auto x = reinterpret_cast<X*>(vx)[0];
auto y = reinterpret_cast<X*>(vy);
auto z = reinterpret_cast<Z*>(vz);
auto params = reinterpret_cast<X*>(vparams);
int totalThreads = gridDim.x * blockDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
Nd4jLong i = tid;
if(yEWS == 1 && zEWS == 1) {
for (; i < len; i += totalThreads)
z[i] = OpType::op(y[i], x, params);
}
else {
for (; i < len; i += totalThreads)
z[i * zEWS] = OpType::op(y[i * yEWS], x, params);
}
}
////////////////////////////////////////////////////////////////////////
template<typename X, typename Z>
template<typename OpType>
__device__ void ScalarBoolTransform<X, Z>::transformCuda(void *vx, Nd4jLong *xShapeInfo,
void *vextraParams,
void *vz, Nd4jLong *zShapeInfo,
void *vscalars,
int *dimension, int dimensionLength,
Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets,
Nd4jLong *tadShapeInfoZ, Nd4jLong *tadOffsetsZ) {
auto x = reinterpret_cast<X*>(vx);
auto scalars = reinterpret_cast<X*>(vscalars);
auto z = reinterpret_cast<Z*>(vz);
auto extraParams = reinterpret_cast<X*>(vextraParams);
if (tadShapeInfoZ == nullptr) {
tadShapeInfoZ = tadShapeInfo;
tadOffsetsZ = tadOffsets;
}
// tad preparation
auto tadEws = shape::elementWiseStride(tadShapeInfo);
auto zEws = shape::elementWiseStride(tadShapeInfoZ);
auto tadLength = shape::tadLength(xShapeInfo, dimension, dimensionLength);
auto numTads =shape::length(xShapeInfo) / tadLength;
if (tadEws > 0 && zEws > 0 && shape::order(tadShapeInfo) == shape::order(zShapeInfo)) {
// main loop, rolling over tads
for (int r = blockIdx.x; r < numTads; r += gridDim.x) {
Z *oZ = z + tadOffsetsZ[r];
X *oX = x + tadOffsets[r];
auto s = scalars[r];
for (int f = threadIdx.x; f < tadLength; f += blockDim.x)
oZ[f * zEws] = OpType::op(oX[f * tadEws], s, extraParams);
}
} else {
// main loop, rolling over tads
for (int r = blockIdx.x; r < numTads; r += gridDim.x) {
Z *oZ = z + tadOffsetsZ[r];
X *oX = x + tadOffsets[r];
auto s = scalars[r];
for (int f = threadIdx.x; f < tadLength; f += blockDim.x)
oZ[shape::getIndexOffset(f, tadShapeInfoZ, tadLength)] = OpType::op(oX[shape::getIndexOffset(f, tadShapeInfo, tadLength)], s, extraParams);
}
}
}
////////////////////////////////////////////////////////////////////////
template<typename X, typename Z>
template <typename OpType>
_CUDA_H void ScalarBoolTransform<X, Z>::intermediateAlongDimension(dim3& launchDims, hipStream_t *stream,
void *x, Nd4jLong *xShapeInfo,
void *z, Nd4jLong *zShapeInfo,
void *scalars,
void *extraParams,
int *dimension, int dimensionLength,
Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets,
Nd4jLong *tadShapeInfoZ, Nd4jLong *tadOffsetsZ) {
hipLaunchKernelGGL(( scalarAlongDimension<X, Z, OpType>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, x, xShapeInfo, extraParams, z, zShapeInfo, scalars, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ);
}
////////////////////////////////////////////////////////////////////////
template<typename X, typename Z>
template<typename OpType>
void _CUDA_H ScalarBoolTransform<X,Z>::intermediateShaped(dim3& launchDims, hipStream_t *stream,
void *vx, Nd4jLong *xShapeInfo,
void *vz, Nd4jLong *zShapeInfo,
void* vscalar,
void *vextraParams, int *allocPointer){
hipLaunchKernelGGL(( scalarSimpleShaped<X, Z, OpType>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, vx, vscalar, xShapeInfo, vextraParams, vz, zShapeInfo, allocPointer);
}
////////////////////////////////////////////////////////////////////////
template<typename X, typename Y>
void ScalarBoolTransform<X,Y>::executeCudaShaped(dim3& launchDims, hipStream_t *stream,
int opNum,
void *vx, Nd4jLong *xShapeInfo,
void *vz, Nd4jLong *zShapeInfo,
void* vscalar,
void *vextraParams) {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H14 opNum:[%i]\n", opNum);
DISPATCH_BY_OPNUM_TT(intermediateShaped, PARAMS(launchDims, stream, vx, xShapeInfo, vz, zShapeInfo, vscalar, vextraParams, nullptr), SCALAR_BOOL_OPS);
}
////////////////////////////////////////////////////////////////////////
template<typename X, typename Y>
void ScalarBoolTransform<X,Y>::executeCudaAlongDimension(dim3& launchDims, hipStream_t *stream, int opNum, void *vx, Nd4jLong *xShapeInfo, void *vz, Nd4jLong *zShapeInfo, void *vscalars, void *vextraParams, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *tadShapeInfoZ, Nd4jLong *tadOffsetsZ) {
DISPATCH_BY_OPNUM_TT(intermediateAlongDimension, PARAMS(launchDims, stream, vx, xShapeInfo, vz, zShapeInfo, vscalars, vextraParams, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), SCALAR_BOOL_OPS);
}
BUILD_DOUBLE_TEMPLATE(template class ND4J_EXPORT ScalarBoolTransform, , LIBND4J_TYPES, BOOL_TYPES);
}
}
| 0e68f82c77b8ef04232130b069a504811214b96a.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma ([email protected]), created on 08.11.2018
// @author [email protected]
//
#include "../scalar_bool.h"
#include <op_boilerplate.h>
#include <types/types.h>
#include "../legacy_ops.h"
using namespace simdOps;
////////////////////////////////////////////////////////////////////////
template <typename X, typename Z, typename OpType>
__global__ void scalarAlongDimension(void *x, Nd4jLong *xShapeInfo,
void *extraParams,
void *z, Nd4jLong *zShapeInfo,
void *scalars,
int *dimension, int dimensionLength,
Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets,
Nd4jLong *tadShapeInfoZ, Nd4jLong *tadOffsetsZ) {
functions::scalar::ScalarBoolTransform<X,Z>::template transformCuda<OpType>(x, xShapeInfo, extraParams, z, zShapeInfo, scalars, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ);
}
////////////////////////////////////////////////////////////////////////
template <typename X, typename Z, typename OpType>
__global__ void scalarSimpleShaped(void* x, void *y, Nd4jLong *xShapeInfo, void *params, void *z, Nd4jLong *zShapeInfo, int *allocationBuffer) {
functions::scalar::ScalarBoolTransform<X,Z>::template transformCuda<OpType>(y, x, xShapeInfo, params, z, zShapeInfo, allocationBuffer);
}
// *********************************************************************//
// *********************************************************************//
namespace functions {
namespace scalar {
////////////////////////////////////////////////////////////////////////
template<typename X, typename Z>
template<typename OpType>
__device__ void ScalarBoolTransform<X, Z>::transformCuda(void* vscalar,
void *vy, Nd4jLong *yShapeInfo,
void *vparams,
void *vz, Nd4jLong *zShapeInfo,
int *allocationBuffer) {
auto scalar = reinterpret_cast<X*>(vscalar)[0];
auto y = reinterpret_cast<X*>(vy);
auto params = reinterpret_cast<X*>(vparams);
auto z = reinterpret_cast<Z*>(vz);
auto yRank = shape::rank(yShapeInfo);
auto yEWS = shape::elementWiseStride(yShapeInfo);
auto yShape = shape::shapeOf(yShapeInfo);
auto yStride = shape::stride(yShapeInfo);
auto zRank = shape::rank(zShapeInfo);
auto zEWS = shape::elementWiseStride(zShapeInfo);
auto zShape = shape::shapeOf(zShapeInfo);
auto zStride = shape::stride(zShapeInfo);
int totalThreads = gridDim.x * blockDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ int len;
if(threadIdx.x == 0)
len = shape::length(yShapeInfo);
__syncthreads();
if(yEWS >= 1 && zEWS >= 1 && shape::order(yShapeInfo) == shape::order(zShapeInfo)) {
transformCuda<OpType>(len, vscalar, vy, yEWS, vparams, vz, zEWS, allocationBuffer);
}
else {
for (Nd4jLong i = tid; i < len; i+= totalThreads)
z[shape::getIndexOffset(i, zShapeInfo, len)] = OpType::op(y[shape::getIndexOffset(i, yShapeInfo, len)], scalar, params);
}
}
////////////////////////////////////////////////////////////////////////
template<typename X, typename Z>
template<typename OpType>
__device__ void ScalarBoolTransform<X, Z>::transformCuda(Nd4jLong len,
void* vx,
void *vy, Nd4jLong yEWS,
void *vparams,
void *vz, Nd4jLong zEWS,
int *allocationBuffer) {
auto x = reinterpret_cast<X*>(vx)[0];
auto y = reinterpret_cast<X*>(vy);
auto z = reinterpret_cast<Z*>(vz);
auto params = reinterpret_cast<X*>(vparams);
int totalThreads = gridDim.x * blockDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
Nd4jLong i = tid;
if(yEWS == 1 && zEWS == 1) {
for (; i < len; i += totalThreads)
z[i] = OpType::op(y[i], x, params);
}
else {
for (; i < len; i += totalThreads)
z[i * zEWS] = OpType::op(y[i * yEWS], x, params);
}
}
////////////////////////////////////////////////////////////////////////
template<typename X, typename Z>
template<typename OpType>
__device__ void ScalarBoolTransform<X, Z>::transformCuda(void *vx, Nd4jLong *xShapeInfo,
void *vextraParams,
void *vz, Nd4jLong *zShapeInfo,
void *vscalars,
int *dimension, int dimensionLength,
Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets,
Nd4jLong *tadShapeInfoZ, Nd4jLong *tadOffsetsZ) {
auto x = reinterpret_cast<X*>(vx);
auto scalars = reinterpret_cast<X*>(vscalars);
auto z = reinterpret_cast<Z*>(vz);
auto extraParams = reinterpret_cast<X*>(vextraParams);
if (tadShapeInfoZ == nullptr) {
tadShapeInfoZ = tadShapeInfo;
tadOffsetsZ = tadOffsets;
}
// tad preparation
auto tadEws = shape::elementWiseStride(tadShapeInfo);
auto zEws = shape::elementWiseStride(tadShapeInfoZ);
auto tadLength = shape::tadLength(xShapeInfo, dimension, dimensionLength);
auto numTads =shape::length(xShapeInfo) / tadLength;
if (tadEws > 0 && zEws > 0 && shape::order(tadShapeInfo) == shape::order(zShapeInfo)) {
// main loop, rolling over tads
for (int r = blockIdx.x; r < numTads; r += gridDim.x) {
Z *oZ = z + tadOffsetsZ[r];
X *oX = x + tadOffsets[r];
auto s = scalars[r];
for (int f = threadIdx.x; f < tadLength; f += blockDim.x)
oZ[f * zEws] = OpType::op(oX[f * tadEws], s, extraParams);
}
} else {
// main loop, rolling over tads
for (int r = blockIdx.x; r < numTads; r += gridDim.x) {
Z *oZ = z + tadOffsetsZ[r];
X *oX = x + tadOffsets[r];
auto s = scalars[r];
for (int f = threadIdx.x; f < tadLength; f += blockDim.x)
oZ[shape::getIndexOffset(f, tadShapeInfoZ, tadLength)] = OpType::op(oX[shape::getIndexOffset(f, tadShapeInfo, tadLength)], s, extraParams);
}
}
}
////////////////////////////////////////////////////////////////////////
template<typename X, typename Z>
template <typename OpType>
_CUDA_H void ScalarBoolTransform<X, Z>::intermediateAlongDimension(dim3& launchDims, cudaStream_t *stream,
void *x, Nd4jLong *xShapeInfo,
void *z, Nd4jLong *zShapeInfo,
void *scalars,
void *extraParams,
int *dimension, int dimensionLength,
Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets,
Nd4jLong *tadShapeInfoZ, Nd4jLong *tadOffsetsZ) {
scalarAlongDimension<X, Z, OpType><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(x, xShapeInfo, extraParams, z, zShapeInfo, scalars, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ);
}
////////////////////////////////////////////////////////////////////////
template<typename X, typename Z>
template<typename OpType>
void _CUDA_H ScalarBoolTransform<X,Z>::intermediateShaped(dim3& launchDims, cudaStream_t *stream,
void *vx, Nd4jLong *xShapeInfo,
void *vz, Nd4jLong *zShapeInfo,
void* vscalar,
void *vextraParams, int *allocPointer){
scalarSimpleShaped<X, Z, OpType><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(vx, vscalar, xShapeInfo, vextraParams, vz, zShapeInfo, allocPointer);
}
////////////////////////////////////////////////////////////////////////
template<typename X, typename Y>
void ScalarBoolTransform<X,Y>::executeCudaShaped(dim3& launchDims, cudaStream_t *stream,
int opNum,
void *vx, Nd4jLong *xShapeInfo,
void *vz, Nd4jLong *zShapeInfo,
void* vscalar,
void *vextraParams) {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H14 opNum:[%i]\n", opNum);
DISPATCH_BY_OPNUM_TT(intermediateShaped, PARAMS(launchDims, stream, vx, xShapeInfo, vz, zShapeInfo, vscalar, vextraParams, nullptr), SCALAR_BOOL_OPS);
}
////////////////////////////////////////////////////////////////////////
template<typename X, typename Y>
void ScalarBoolTransform<X,Y>::executeCudaAlongDimension(dim3& launchDims, cudaStream_t *stream, int opNum, void *vx, Nd4jLong *xShapeInfo, void *vz, Nd4jLong *zShapeInfo, void *vscalars, void *vextraParams, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *tadShapeInfoZ, Nd4jLong *tadOffsetsZ) {
DISPATCH_BY_OPNUM_TT(intermediateAlongDimension, PARAMS(launchDims, stream, vx, xShapeInfo, vz, zShapeInfo, vscalars, vextraParams, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), SCALAR_BOOL_OPS);
}
BUILD_DOUBLE_TEMPLATE(template class ND4J_EXPORT ScalarBoolTransform, , LIBND4J_TYPES, BOOL_TYPES);
}
}
|
f7d7295a6e91822ec249209549c09289f85474bd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// RGB Median filter kernel using binary search method
// Uses 32 bit GMEM reads into a block of LMEM padded for apron of radius = 1 (3x3 neighbor op)
// R, G and B medians are treated separately
//*****************************************************************************
__global__ void ckMedian(
const uchar4* uc4Source,
unsigned int* uiDest,
const int iLocalPixPitch,
const int iImageWidth,
const int iDevImageHeight)
{
// Get parent image x and y pixel coordinates from global ID, and compute offset into parent GMEM data
int iLocalIdX = threadIdx.x;
int iLocalIdY = threadIdx.y;
int iGroupIdX = blockIdx.x;
int iBlockX = blockDim.x;
int iBlockY = blockDim.y;
int iImagePosX = blockIdx.x * iBlockX + iLocalIdX;
int iDevYPrime = blockIdx.y * iBlockY + iLocalIdY - 1; // Shift offset up 1 radius (1 row) for reads
int iImageX = gridDim.x * blockDim.x;
HIP_DYNAMIC_SHARED( uchar4, uc4LocalData)
int iDevGMEMOffset = __mul24(iDevYPrime, iImageX) + iImagePosX;
// Compute initial offset of current pixel within work group LMEM block
int iLocalPixOffset = __mul24(iLocalIdY, iLocalPixPitch) + iLocalIdX + 1;
// Main read of GMEM data into LMEM
if((iDevYPrime > -1) && (iDevYPrime < iDevImageHeight) && (iImagePosX < iImageWidth))
{
uc4LocalData[iLocalPixOffset] = uc4Source[iDevGMEMOffset];
}
else
{
uc4LocalData[iLocalPixOffset] = make_uchar4(0, 0, 0, 0);
}
// Work items with y ID < 2 read bottom 2 rows of LMEM
if (iLocalIdY < 2)
{
// Increase local offset by 1 workgroup LMEM block height
// to read in top rows from the next block region down
iLocalPixOffset += __mul24(iBlockY, iLocalPixPitch);
// If source offset is within the image boundaries
if (((iDevYPrime + iBlockY) < iDevImageHeight) && (iImagePosX < iImageWidth))
{
// Read in top rows from the next block region down
uc4LocalData[iLocalPixOffset] = uc4Source[iDevGMEMOffset +
__mul24(iBlockY, iImageX)];
}
else
{
uc4LocalData[iLocalPixOffset] = make_uchar4(0, 0, 0, 0);
}
}
// Work items with x ID at right workgroup edge will read Left apron pixel
if (iLocalIdX == (iBlockX - 1))
{
// set local offset to read data from the next region over
iLocalPixOffset = __mul24(iLocalIdY, iLocalPixPitch);
// If source offset is within the image boundaries and not at the leftmost workgroup
if ((iDevYPrime > -1) && (iDevYPrime < iDevImageHeight) && (iGroupIdX > 0))
{
// Read data into the LMEM apron from the GMEM at the left edge of the next block region over
uc4LocalData[iLocalPixOffset] = uc4Source[__mul24(iDevYPrime,
iImageX) + __mul24(iGroupIdX, iBlockX) - 1];
}
else
{
uc4LocalData[iLocalPixOffset] = make_uchar4(0, 0, 0, 0);
}
// If in the bottom 2 rows of workgroup block
if (iLocalIdY < 2)
{
// Increase local offset by 1 workgroup LMEM block height
// to read in top rows from the next block region down
iLocalPixOffset += __mul24(iBlockY, iLocalPixPitch);
// If source offset in the next block down isn't off the image and not at the leftmost workgroup
if (((iDevYPrime + iBlockY) < iDevImageHeight) && (iGroupIdX > 0))
{
// read in from GMEM (reaching down 1 workgroup LMEM block height and left 1 pixel)
uc4LocalData[iLocalPixOffset] = uc4Source[__mul24((iDevYPrime +
iBlockY), iImageX) + __mul24(iGroupIdX, iBlockX) - 1];
}
else
{
uc4LocalData[iLocalPixOffset] = make_uchar4(0, 0, 0, 0);
}
}
}
else if (iLocalIdX == 0) // Work items with x ID at left workgroup edge will read right apron pixel
{
// set local offset
iLocalPixOffset = __mul24((iLocalIdY + 1), iLocalPixPitch) - 1;
if ((iDevYPrime > -1) && (iDevYPrime < iDevImageHeight) &&
(__mul24((iGroupIdX + 1), iBlockX) < iImageWidth))
{
// read in from GMEM (reaching left 1 pixel) if source offset is within image boundaries
uc4LocalData[iLocalPixOffset] = uc4Source[__mul24(iDevYPrime,
iImageX) + __mul24((iGroupIdX + 1), iBlockX)];
}
else
{
uc4LocalData[iLocalPixOffset] = make_uchar4(0, 0, 0, 0);
}
// Read bottom 2 rows of workgroup LMEM block
if (iLocalIdY < 2)
{
// increase local offset by 1 workgroup LMEM block height
iLocalPixOffset += (__mul24(iBlockY, iLocalPixPitch));
if (((iDevYPrime + iBlockY) < iDevImageHeight) &&
(__mul24((iGroupIdX + 1), iBlockX) < iImageWidth) )
{
// read in from GMEM (reaching down 1 workgroup LMEM block height and left 1 pixel) if source offset is within image boundaries
uc4LocalData[iLocalPixOffset] = uc4Source[__mul24((iDevYPrime +
iBlockY), iImageX) + __mul24((iGroupIdX + 1), iBlockX)];
}
else
{
uc4LocalData[iLocalPixOffset] = make_uchar4(0, 0, 0, 0);
}
}
}
// Synchronize the read into LMEM
__syncthreads();
// Compute
// reset accumulators
float fMedianEstimate[3] = {128.0f, 128.0f, 128.0f};
float fMinBound[3] = {0.0f, 0.0f, 0.0f};
float fMaxBound[3] = {255.0f, 255.0f, 255.0f};
// now find the median using a binary search - Divide and Conquer 256 gv levels for 8 bit plane
for(int iSearch = 0; iSearch < 8; iSearch++) // for 8 bit data, use 0..8. For 16 bit data, 0..16. More iterations for more bits.
{
unsigned int uiHighCount [3] = {0, 0, 0};
// set local offset and kernel offset
iLocalPixOffset = __mul24(iLocalIdY, iLocalPixPitch) + iLocalIdX;
// Row1 Left Pix (RGB)
uiHighCount[0] += (fMedianEstimate[0] < uc4LocalData[iLocalPixOffset].x);
uiHighCount[1] += (fMedianEstimate[1] < uc4LocalData[iLocalPixOffset].y);
uiHighCount[2] += (fMedianEstimate[2] < uc4LocalData[iLocalPixOffset++].z);
// Row1 Middle Pix (RGB)
uiHighCount[0] += (fMedianEstimate[0] < uc4LocalData[iLocalPixOffset].x);
uiHighCount[1] += (fMedianEstimate[1] < uc4LocalData[iLocalPixOffset].y);
uiHighCount[2] += (fMedianEstimate[2] < uc4LocalData[iLocalPixOffset++].z);
// Row1 Right Pix (RGB)
uiHighCount[0] += (fMedianEstimate[0] < uc4LocalData[iLocalPixOffset].x);
uiHighCount[1] += (fMedianEstimate[1] < uc4LocalData[iLocalPixOffset].y);
uiHighCount[2] += (fMedianEstimate[2] < uc4LocalData[iLocalPixOffset].z);
// set the offset into SMEM for next row
iLocalPixOffset += (iLocalPixPitch - 2);
// Row2 Left Pix (RGB)
uiHighCount[0] += (fMedianEstimate[0] < uc4LocalData[iLocalPixOffset].x);
uiHighCount[1] += (fMedianEstimate[1] < uc4LocalData[iLocalPixOffset].y);
uiHighCount[2] += (fMedianEstimate[2] < uc4LocalData[iLocalPixOffset++].z);
// Row2 Middle Pix (RGB)
uiHighCount[0] += (fMedianEstimate[0] < uc4LocalData[iLocalPixOffset].x);
uiHighCount[1] += (fMedianEstimate[1] < uc4LocalData[iLocalPixOffset].y);
uiHighCount[2] += (fMedianEstimate[2] < uc4LocalData[iLocalPixOffset++].z);
// Row2 Right Pix (RGB)
uiHighCount[0] += (fMedianEstimate[0] < uc4LocalData[iLocalPixOffset].x);
uiHighCount[1] += (fMedianEstimate[1] < uc4LocalData[iLocalPixOffset].y);
uiHighCount[2] += (fMedianEstimate[2] < uc4LocalData[iLocalPixOffset].z);
// set the offset into SMEM for next row
iLocalPixOffset += (iLocalPixPitch - 2);
// Row3 Left Pix (RGB)
uiHighCount[0] += (fMedianEstimate[0] < uc4LocalData[iLocalPixOffset].x);
uiHighCount[1] += (fMedianEstimate[1] < uc4LocalData[iLocalPixOffset].y);
uiHighCount[2] += (fMedianEstimate[2] < uc4LocalData[iLocalPixOffset++].z);
// Row3 Middle Pix (RGB)
uiHighCount[0] += (fMedianEstimate[0] < uc4LocalData[iLocalPixOffset].x);
uiHighCount[1] += (fMedianEstimate[1] < uc4LocalData[iLocalPixOffset].y);
uiHighCount[2] += (fMedianEstimate[2] < uc4LocalData[iLocalPixOffset++].z);
// Row3 Right Pix (RGB)
uiHighCount[0] += (fMedianEstimate[0] < uc4LocalData[iLocalPixOffset].x);
uiHighCount[1] += (fMedianEstimate[1] < uc4LocalData[iLocalPixOffset].y);
uiHighCount[2] += (fMedianEstimate[2] < uc4LocalData[iLocalPixOffset].z);
//********************************
// reset the appropriate bound, depending upon counter
if(uiHighCount[0] > 4)
{
fMinBound[0] = fMedianEstimate[0];
}
else
{
fMaxBound[0] = fMedianEstimate[0];
}
if(uiHighCount[1] > 4)
{
fMinBound[1] = fMedianEstimate[1];
}
else
{
fMaxBound[1] = fMedianEstimate[1];
}
if(uiHighCount[2] > 4)
{
fMinBound[2] = fMedianEstimate[2];
}
else
{
fMaxBound[2] = fMedianEstimate[2];
}
// refine the estimate
fMedianEstimate[0] = 0.5f * (fMaxBound[0] + fMinBound[0]);
fMedianEstimate[1] = 0.5f * (fMaxBound[1] + fMinBound[1]);
fMedianEstimate[2] = 0.5f * (fMaxBound[2] + fMinBound[2]);
}
// pack into a monochrome unsigned int
unsigned int uiPackedPix = 0x000000FF & (unsigned int)(fMedianEstimate[0] + 0.5f);
uiPackedPix |= 0x0000FF00 & (((unsigned int)(fMedianEstimate[1] + 0.5f)) << 8);
uiPackedPix |= 0x00FF0000 & (((unsigned int)(fMedianEstimate[2] + 0.5f)) << 16);
// Write out to GMEM with restored offset
if((iDevYPrime < iDevImageHeight) && (iImagePosX < iImageWidth))
{
uiDest[iDevGMEMOffset + iImageX] = uiPackedPix;
}
}
| f7d7295a6e91822ec249209549c09289f85474bd.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// RGB Median filter kernel using binary search method
// Uses 32 bit GMEM reads into a block of LMEM padded for apron of radius = 1 (3x3 neighbor op)
// R, G and B medians are treated separately
//*****************************************************************************
__global__ void ckMedian(
const uchar4* uc4Source,
unsigned int* uiDest,
const int iLocalPixPitch,
const int iImageWidth,
const int iDevImageHeight)
{
// Get parent image x and y pixel coordinates from global ID, and compute offset into parent GMEM data
int iLocalIdX = threadIdx.x;
int iLocalIdY = threadIdx.y;
int iGroupIdX = blockIdx.x;
int iBlockX = blockDim.x;
int iBlockY = blockDim.y;
int iImagePosX = blockIdx.x * iBlockX + iLocalIdX;
int iDevYPrime = blockIdx.y * iBlockY + iLocalIdY - 1; // Shift offset up 1 radius (1 row) for reads
int iImageX = gridDim.x * blockDim.x;
HIP_DYNAMIC_SHARED( uchar4, uc4LocalData)
int iDevGMEMOffset = __mul24(iDevYPrime, iImageX) + iImagePosX;
// Compute initial offset of current pixel within work group LMEM block
int iLocalPixOffset = __mul24(iLocalIdY, iLocalPixPitch) + iLocalIdX + 1;
// Main read of GMEM data into LMEM
if((iDevYPrime > -1) && (iDevYPrime < iDevImageHeight) && (iImagePosX < iImageWidth))
{
uc4LocalData[iLocalPixOffset] = uc4Source[iDevGMEMOffset];
}
else
{
uc4LocalData[iLocalPixOffset] = make_uchar4(0, 0, 0, 0);
}
// Work items with y ID < 2 read bottom 2 rows of LMEM
if (iLocalIdY < 2)
{
// Increase local offset by 1 workgroup LMEM block height
// to read in top rows from the next block region down
iLocalPixOffset += __mul24(iBlockY, iLocalPixPitch);
// If source offset is within the image boundaries
if (((iDevYPrime + iBlockY) < iDevImageHeight) && (iImagePosX < iImageWidth))
{
// Read in top rows from the next block region down
uc4LocalData[iLocalPixOffset] = uc4Source[iDevGMEMOffset +
__mul24(iBlockY, iImageX)];
}
else
{
uc4LocalData[iLocalPixOffset] = make_uchar4(0, 0, 0, 0);
}
}
// Work items with x ID at right workgroup edge will read Left apron pixel
if (iLocalIdX == (iBlockX - 1))
{
// set local offset to read data from the next region over
iLocalPixOffset = __mul24(iLocalIdY, iLocalPixPitch);
// If source offset is within the image boundaries and not at the leftmost workgroup
if ((iDevYPrime > -1) && (iDevYPrime < iDevImageHeight) && (iGroupIdX > 0))
{
// Read data into the LMEM apron from the GMEM at the left edge of the next block region over
uc4LocalData[iLocalPixOffset] = uc4Source[__mul24(iDevYPrime,
iImageX) + __mul24(iGroupIdX, iBlockX) - 1];
}
else
{
uc4LocalData[iLocalPixOffset] = make_uchar4(0, 0, 0, 0);
}
// If in the bottom 2 rows of workgroup block
if (iLocalIdY < 2)
{
// Increase local offset by 1 workgroup LMEM block height
// to read in top rows from the next block region down
iLocalPixOffset += __mul24(iBlockY, iLocalPixPitch);
// If source offset in the next block down isn't off the image and not at the leftmost workgroup
if (((iDevYPrime + iBlockY) < iDevImageHeight) && (iGroupIdX > 0))
{
// read in from GMEM (reaching down 1 workgroup LMEM block height and left 1 pixel)
uc4LocalData[iLocalPixOffset] = uc4Source[__mul24((iDevYPrime +
iBlockY), iImageX) + __mul24(iGroupIdX, iBlockX) - 1];
}
else
{
uc4LocalData[iLocalPixOffset] = make_uchar4(0, 0, 0, 0);
}
}
}
else if (iLocalIdX == 0) // Work items with x ID at left workgroup edge will read right apron pixel
{
// set local offset
iLocalPixOffset = __mul24((iLocalIdY + 1), iLocalPixPitch) - 1;
if ((iDevYPrime > -1) && (iDevYPrime < iDevImageHeight) &&
(__mul24((iGroupIdX + 1), iBlockX) < iImageWidth))
{
// read in from GMEM (reaching left 1 pixel) if source offset is within image boundaries
uc4LocalData[iLocalPixOffset] = uc4Source[__mul24(iDevYPrime,
iImageX) + __mul24((iGroupIdX + 1), iBlockX)];
}
else
{
uc4LocalData[iLocalPixOffset] = make_uchar4(0, 0, 0, 0);
}
// Read bottom 2 rows of workgroup LMEM block
if (iLocalIdY < 2)
{
// increase local offset by 1 workgroup LMEM block height
iLocalPixOffset += (__mul24(iBlockY, iLocalPixPitch));
if (((iDevYPrime + iBlockY) < iDevImageHeight) &&
(__mul24((iGroupIdX + 1), iBlockX) < iImageWidth) )
{
// read in from GMEM (reaching down 1 workgroup LMEM block height and left 1 pixel) if source offset is within image boundaries
uc4LocalData[iLocalPixOffset] = uc4Source[__mul24((iDevYPrime +
iBlockY), iImageX) + __mul24((iGroupIdX + 1), iBlockX)];
}
else
{
uc4LocalData[iLocalPixOffset] = make_uchar4(0, 0, 0, 0);
}
}
}
// Synchronize the read into LMEM
__syncthreads();
// Compute
// reset accumulators
float fMedianEstimate[3] = {128.0f, 128.0f, 128.0f};
float fMinBound[3] = {0.0f, 0.0f, 0.0f};
float fMaxBound[3] = {255.0f, 255.0f, 255.0f};
// now find the median using a binary search - Divide and Conquer 256 gv levels for 8 bit plane
for(int iSearch = 0; iSearch < 8; iSearch++) // for 8 bit data, use 0..8. For 16 bit data, 0..16. More iterations for more bits.
{
unsigned int uiHighCount [3] = {0, 0, 0};
// set local offset and kernel offset
iLocalPixOffset = __mul24(iLocalIdY, iLocalPixPitch) + iLocalIdX;
// Row1 Left Pix (RGB)
uiHighCount[0] += (fMedianEstimate[0] < uc4LocalData[iLocalPixOffset].x);
uiHighCount[1] += (fMedianEstimate[1] < uc4LocalData[iLocalPixOffset].y);
uiHighCount[2] += (fMedianEstimate[2] < uc4LocalData[iLocalPixOffset++].z);
// Row1 Middle Pix (RGB)
uiHighCount[0] += (fMedianEstimate[0] < uc4LocalData[iLocalPixOffset].x);
uiHighCount[1] += (fMedianEstimate[1] < uc4LocalData[iLocalPixOffset].y);
uiHighCount[2] += (fMedianEstimate[2] < uc4LocalData[iLocalPixOffset++].z);
// Row1 Right Pix (RGB)
uiHighCount[0] += (fMedianEstimate[0] < uc4LocalData[iLocalPixOffset].x);
uiHighCount[1] += (fMedianEstimate[1] < uc4LocalData[iLocalPixOffset].y);
uiHighCount[2] += (fMedianEstimate[2] < uc4LocalData[iLocalPixOffset].z);
// set the offset into SMEM for next row
iLocalPixOffset += (iLocalPixPitch - 2);
// Row2 Left Pix (RGB)
uiHighCount[0] += (fMedianEstimate[0] < uc4LocalData[iLocalPixOffset].x);
uiHighCount[1] += (fMedianEstimate[1] < uc4LocalData[iLocalPixOffset].y);
uiHighCount[2] += (fMedianEstimate[2] < uc4LocalData[iLocalPixOffset++].z);
// Row2 Middle Pix (RGB)
uiHighCount[0] += (fMedianEstimate[0] < uc4LocalData[iLocalPixOffset].x);
uiHighCount[1] += (fMedianEstimate[1] < uc4LocalData[iLocalPixOffset].y);
uiHighCount[2] += (fMedianEstimate[2] < uc4LocalData[iLocalPixOffset++].z);
// Row2 Right Pix (RGB)
uiHighCount[0] += (fMedianEstimate[0] < uc4LocalData[iLocalPixOffset].x);
uiHighCount[1] += (fMedianEstimate[1] < uc4LocalData[iLocalPixOffset].y);
uiHighCount[2] += (fMedianEstimate[2] < uc4LocalData[iLocalPixOffset].z);
// set the offset into SMEM for next row
iLocalPixOffset += (iLocalPixPitch - 2);
// Row3 Left Pix (RGB)
uiHighCount[0] += (fMedianEstimate[0] < uc4LocalData[iLocalPixOffset].x);
uiHighCount[1] += (fMedianEstimate[1] < uc4LocalData[iLocalPixOffset].y);
uiHighCount[2] += (fMedianEstimate[2] < uc4LocalData[iLocalPixOffset++].z);
// Row3 Middle Pix (RGB)
uiHighCount[0] += (fMedianEstimate[0] < uc4LocalData[iLocalPixOffset].x);
uiHighCount[1] += (fMedianEstimate[1] < uc4LocalData[iLocalPixOffset].y);
uiHighCount[2] += (fMedianEstimate[2] < uc4LocalData[iLocalPixOffset++].z);
// Row3 Right Pix (RGB)
uiHighCount[0] += (fMedianEstimate[0] < uc4LocalData[iLocalPixOffset].x);
uiHighCount[1] += (fMedianEstimate[1] < uc4LocalData[iLocalPixOffset].y);
uiHighCount[2] += (fMedianEstimate[2] < uc4LocalData[iLocalPixOffset].z);
//********************************
// reset the appropriate bound, depending upon counter
if(uiHighCount[0] > 4)
{
fMinBound[0] = fMedianEstimate[0];
}
else
{
fMaxBound[0] = fMedianEstimate[0];
}
if(uiHighCount[1] > 4)
{
fMinBound[1] = fMedianEstimate[1];
}
else
{
fMaxBound[1] = fMedianEstimate[1];
}
if(uiHighCount[2] > 4)
{
fMinBound[2] = fMedianEstimate[2];
}
else
{
fMaxBound[2] = fMedianEstimate[2];
}
// refine the estimate
fMedianEstimate[0] = 0.5f * (fMaxBound[0] + fMinBound[0]);
fMedianEstimate[1] = 0.5f * (fMaxBound[1] + fMinBound[1]);
fMedianEstimate[2] = 0.5f * (fMaxBound[2] + fMinBound[2]);
}
// pack into a monochrome unsigned int
unsigned int uiPackedPix = 0x000000FF & (unsigned int)(fMedianEstimate[0] + 0.5f);
uiPackedPix |= 0x0000FF00 & (((unsigned int)(fMedianEstimate[1] + 0.5f)) << 8);
uiPackedPix |= 0x00FF0000 & (((unsigned int)(fMedianEstimate[2] + 0.5f)) << 16);
// Write out to GMEM with restored offset
if((iDevYPrime < iDevImageHeight) && (iImagePosX < iImageWidth))
{
uiDest[iDevGMEMOffset + iImageX] = uiPackedPix;
}
}
|
7ae850143e058115f436f8c602f615a32ba2d1ce.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2021, NAVER Corp. Authored by CLOVA.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include <float.h>
#include "src/fastertransformer/kernels/reduce_kernel_utils.cuh"
#include "src/fastertransformer/kernels/sampling_topk_kernels.h"
#include "src/fastertransformer/kernels/sampling_topp_kernels.h"
#include "src/fastertransformer/layers/sampling_layers/TopPSamplingLayer.h"
#include "src/fastertransformer/utils/logger.h"
#include "src/fastertransformer/utils/memory_utils.h"
namespace fastertransformer {
static __global__ void set_topp_runtime_args(int batch_size,
uint top_k,
uint* top_ks,
int top_ks_size,
float top_p,
float* top_ps,
int top_ps_size,
bool* skip_decode,
float* initial_top_p_buf,
float* top_p_decay_buf,
const float* top_p_decay,
float* top_p_min_buf,
const float* top_p_min,
int32_t* top_p_reset_ids_buf,
const uint32_t* top_p_reset_ids)
{
/**
* @brief Setup the runtime arguments for topp, broadcasting top_p to top_ps
and top_k to top_ks, copying top_p_decay/top_p_min/top_p_reset_ids
to internal buffers.
*
* \param batch_size [batch_size]
* \param op_k [batch_size]
* \param top_ks [batch_size]
* \param top_ks_size [batch_size]
* \param top_p [batch_size]
* \param top_ps [batch_size]
* \param top_ps_size [batch_size]
* \param skip_decode [batch_size]
* \param initial_top_p_buf [batch_size]
* \param top_p_decay_buf [batch_size]
* \param top_p_decay [batch_size], optional, must between [0, 1]
* \param top_p_min_buf [batch_size]
* \param top_p_min [batch_size], optional, must between [0, 1]
* \param top_p_reset_ids_buf [batch_size]
* \param top_p_reset_ids [batch_size], optional
*
*/
int index = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = index; i < batch_size; i += gridDim.x * blockDim.x) {
uint k = top_ks_size > 1 ? top_ks[i] : top_k;
float p = top_ps_size > 1 ? top_ps[i] : top_p;
if (k == 0 && p == 0.0f) {
// FT's topp implementation does not support topp = 0.0f, but it equivalent to greedy search.
// So, we set the topk = 1 as an alternative solution.
k = 1;
}
top_ks[i] = k;
// Clip p value if it is out of range. range = [0.0, 1.0].
top_ps[i] = p < 0.0f ? 0.0f : (p > 1.0f ? 1.0f : p);
if (p < 0.0f || p > 1.0f) {
printf("[WARNING] topp (%f) is out of range ([0.0, 1.0f]) for token %d"
" clip to closest number %f.\n",
p,
i,
top_ps[i]);
}
skip_decode[i] = k > 0;
initial_top_p_buf[i] = top_ps[i];
top_p_decay_buf[i] = top_p_decay == nullptr ? 1.0f : top_p_decay[i];
if (top_p_decay_buf[i] > 1.0f || top_p_decay_buf[i] <= 0.0f) {
printf("[WARNING] top_p_decay_buf (%f) is out of range ([0.0, 1.0f]) for token %d,"
" change to 1.0f.\n",
top_p_decay_buf[i],
i);
top_p_decay_buf[i] = 1.0f;
}
top_p_min_buf[i] = top_p_min == nullptr ? 1e-6f : top_p_min[i]; // prevent topp becoming 0.0
if (top_p_min_buf[i] > 1.0f || top_p_min_buf[i] <= 0.0f) {
printf("[WARNING] top_p_min_buf (%f) is out of range ([0.0, 1.0f]) for token %d,"
" change to 0.5f.\n",
top_p_min_buf[i],
i);
top_p_min_buf[i] = 0.5f;
}
top_p_reset_ids_buf[i] = (int32_t)(top_p_reset_ids == nullptr ? -1 : top_p_reset_ids[i]);
}
}
template<typename T>
void TopPSamplingLayer<T>::allocateBuffer()
{
FT_CHECK(false);
}
template<typename T>
void TopPSamplingLayer<T>::allocateBuffer(size_t batch_size, Tensor top_k, Tensor top_p)
{
FT_LOG_DEBUG(__PRETTY_FUNCTION__);
BaseSamplingLayer<T>::allocateBuffer(batch_size, top_k, top_p);
invokeTopPSampling<T>(nullptr, // workspace
sampling_workspace_size_,
cub_temp_storage_size_,
nullptr, // output_ids
nullptr, // sequence_length
nullptr, // finished_buffer
nullptr, // cum_log_probs
nullptr, // output_log_probs
nullptr, // log_probs
topp_id_vals_buf_,
topp_offset_buf_,
begin_topp_offset_buf_,
curandstate_buf_,
batch_size,
vocab_size_padded_,
nullptr,
top_p.size() > 0 ? top_p.max<float>() : 0.0f,
stream_,
cuda_device_prop_,
skip_decode_buf_);
sampling_workspace_ = allocator_->reMalloc(sampling_workspace_, sampling_workspace_size_, true);
runtime_top_k_buf_ =
reinterpret_cast<uint*>(allocator_->reMalloc(runtime_top_k_buf_, sizeof(uint) * batch_size, false));
runtime_top_p_buf_ =
reinterpret_cast<float*>(allocator_->reMalloc(runtime_top_p_buf_, sizeof(float) * batch_size, false));
initial_top_p_buf_ =
reinterpret_cast<float*>(allocator_->reMalloc(initial_top_p_buf_, sizeof(float) * batch_size, false));
top_p_decay_buf_ =
reinterpret_cast<float*>(allocator_->reMalloc(top_p_decay_buf_, sizeof(float) * batch_size, false));
top_p_min_buf_ = reinterpret_cast<float*>(allocator_->reMalloc(top_p_min_buf_, sizeof(float) * batch_size, false));
top_p_reset_ids_buf_ =
reinterpret_cast<int32_t*>(allocator_->reMalloc(top_p_reset_ids_buf_, sizeof(int32_t) * batch_size, false));
topp_id_vals_buf_ = reinterpret_cast<int*>(
allocator_->reMalloc(topp_id_vals_buf_, sizeof(int) * batch_size * vocab_size_padded_, false));
topp_offset_buf_ =
reinterpret_cast<int*>(allocator_->reMalloc(topp_offset_buf_, sizeof(int) * (batch_size + 1), false));
begin_topp_offset_buf_ =
reinterpret_cast<int*>(allocator_->reMalloc(begin_topp_offset_buf_, sizeof(int) * (batch_size + 1), false));
is_allocate_buffer_ = true;
}
template<typename T>
void TopPSamplingLayer<T>::freeBuffer()
{
FT_LOG_DEBUG(__PRETTY_FUNCTION__);
if (is_allocate_buffer_) {
allocator_->free((void**)(&sampling_workspace_));
allocator_->free((void**)(&topp_id_vals_buf_));
allocator_->free((void**)(&topp_offset_buf_));
allocator_->free((void**)(&begin_topp_offset_buf_));
allocator_->free((void**)(&runtime_top_k_buf_));
allocator_->free((void**)(&runtime_top_p_buf_));
allocator_->free((void**)(&initial_top_p_buf_));
allocator_->free((void**)(&top_p_decay_buf_));
allocator_->free((void**)(&top_p_min_buf_));
allocator_->free((void**)(&top_p_reset_ids_buf_));
}
BaseSamplingLayer<T>::freeBuffer();
is_allocate_buffer_ = false;
}
template<typename T>
void TopPSamplingLayer<T>::setup(const size_t batch_size, const size_t beam_width, TensorMap* runtime_args)
{
/**
* @brief Set up the sampling layer for given runtime arguments.
* runtime_args:
* \param runtime_top_k [1] or [batch_size] on cpu, optional.
* \param runtime_top_p [1] or [batch_size] on cpu, optional
* \param temperature [1] or [batch_size] on cpu, optional
* \param repetition_penalty [1] or [batch_size] on cpu, optional
* \param top_p_decay [batch_size] on gpu, float, optional
* \param top_p_min [batch_size] on gpu, float, optional
* \param top_p_reset_ids [batch_size] on gpu, uint32, optional
**/
FT_LOG_DEBUG(__PRETTY_FUNCTION__);
BaseSamplingLayer<T>::setup(batch_size, beam_width, runtime_args);
const Tensor runtime_top_p = runtime_args->isExist("runtime_top_p") ? runtime_args->at("runtime_top_p") : Tensor();
const size_t runtime_top_p_size = runtime_top_p.size();
if (runtime_top_p_size == 0) {
std::fill_n(skip_decode_, batch_size, true);
return;
}
uint tmp_top_k = 0;
const Tensor runtime_top_k = runtime_args->isExist("runtime_top_k") ?
runtime_args->at("runtime_top_k") :
Tensor(MEMORY_CPU, TYPE_UINT32, {1}, &tmp_top_k);
const size_t runtime_top_k_size = runtime_top_k.size();
uint top_k = runtime_top_k.getVal<uint>();
float top_p = runtime_top_p.getVal<float>();
if (runtime_top_k_size > 1) {
FT_CHECK(runtime_top_k.size() == batch_size);
cudaH2Dcpy(runtime_top_k_buf_, runtime_top_k.getPtr<uint>(), batch_size);
}
if (runtime_top_p_size > 1) {
FT_CHECK(runtime_top_p.size() == batch_size);
cudaH2Dcpy(runtime_top_p_buf_, runtime_top_p.getPtr<float>(), batch_size);
}
dim3 block(::min((int)batch_size, 256));
dim3 grid(div_up((int)batch_size, (int)block.x));
const float* top_p_decay = runtime_args->getPtr<float>("top_p_decay", nullptr);
const float* top_p_min = runtime_args->getPtr<float>("top_p_min", nullptr);
const uint32_t* top_p_reset_ids = runtime_args->getPtr<uint32_t>("top_p_reset_ids", nullptr);
hipLaunchKernelGGL(( set_topp_runtime_args), dim3(grid), dim3(block), 0, stream_, batch_size,
top_k,
runtime_top_k_buf_,
runtime_top_k_size,
top_p,
runtime_top_p_buf_,
runtime_top_p_size,
skip_decode_buf_,
initial_top_p_buf_,
top_p_decay_buf_,
top_p_decay,
top_p_min_buf_,
top_p_min,
top_p_reset_ids_buf_,
top_p_reset_ids);
sync_check_cuda_error();
cudaAutoCpy(skip_decode_, skip_decode_buf_, batch_size, stream_);
float* runtime_top_ps = new float[batch_size];
cudaAutoCpy(runtime_top_ps, runtime_top_p_buf_, batch_size, stream_);
runtime_max_top_p_ = *std::max_element(runtime_top_ps, runtime_top_ps + batch_size);
delete[] runtime_top_ps;
}
template<typename T>
void TopPSamplingLayer<T>::runSampling(TensorMap* output_tensors, TensorMap* input_tensors)
{
/**
* input_tensors:
* \param logits [local_batch_size, vocab_size_padded]
* \param embedding_bias [vocab_size_padded], optional
* \param step [1] on cpu
* \param max_input_length [1] on cpu
* \param input_lengths [local_batch_size], optional
* \param ite [1] on cpu
* output_tensors:
* \param output_ids [max_seq_len, batch_size]
* \param finished [local_batch_size], optional
* \param sequence_length [local_batch_size], optional
* \param cum_log_probs [batch_size], must be float*, optional
* \param The cumultative log probability of generated tokens.
* \param output_log_probs [local_batch_size], must be float*, optional
log probs at the current step.
**/
FT_LOG_DEBUG("%s start", __PRETTY_FUNCTION__);
FT_CHECK(input_tensors->size() >= 4);
FT_CHECK(output_tensors->size() >= 1);
const int batch_size = output_tensors->at("output_ids").shape[1];
const int local_batch_size = input_tensors->at("logits").shape[0];
const int step = input_tensors->at("step").getVal<int>();
const int ite = input_tensors->at("ite").getVal<int>();
// in case of skip any, the logit value is already copied and processed.
T* logits = !skip_any_ ? input_tensors->at("logits").getPtr<T>() : runtime_logits_buf_;
invokeTopPInitialize(
topp_id_vals_buf_, topp_offset_buf_, begin_topp_offset_buf_, local_batch_size, vocab_size_padded_, stream_);
sync_check_cuda_error();
invokeAddBiasSoftMax(logits,
(T*)(nullptr),
input_tensors->at("end_id").getPtr<int>(),
output_tensors->at("finished", Tensor{MEMORY_GPU, TYPE_INVALID, {}, nullptr}).getPtr<bool>(),
local_batch_size,
vocab_size_padded_,
vocab_size_,
stream_);
sync_check_cuda_error();
float* cum_log_probs =
output_tensors->isExist("cum_log_probs") ? output_tensors->at("cum_log_probs").getPtr<float>() : nullptr;
float* output_log_probs =
output_tensors->isExist("output_log_probs") ? output_tensors->at("output_log_probs").getPtr<float>() : nullptr;
invokeBatchTopPSampling<T>(
sampling_workspace_,
sampling_workspace_size_,
cub_temp_storage_size_,
output_tensors->at("output_ids").getPtrWithOffset<int>(step * batch_size + ite * local_batch_size),
output_tensors->at("sequence_length", Tensor{MEMORY_GPU, TYPE_INVALID, {}, nullptr}).getPtr<int>(),
output_tensors->at("finished", Tensor{MEMORY_GPU, TYPE_INVALID, {}, nullptr}).getPtr<bool>(),
cum_log_probs,
output_log_probs,
logits,
topp_id_vals_buf_,
topp_offset_buf_,
begin_topp_offset_buf_,
curandstate_buf_ + ite * local_batch_size,
local_batch_size,
vocab_size_padded_,
input_tensors->at("end_id").getPtr<int>(),
runtime_max_top_p_,
runtime_top_p_buf_ + ite * local_batch_size,
stream_,
cuda_device_prop_,
skip_decode_buf_ + ite * local_batch_size);
sync_check_cuda_error();
invokeComputeToppDecay(
runtime_top_p_buf_ + ite * local_batch_size,
initial_top_p_buf_ + ite * local_batch_size,
output_tensors->getPtrWithOffset<int>("output_ids", step * batch_size + ite * local_batch_size),
top_p_decay_buf_ + ite * local_batch_size,
top_p_min_buf_ + ite * local_batch_size,
top_p_reset_ids_buf_ + ite * local_batch_size,
local_batch_size,
stream_);
sync_check_cuda_error();
FT_LOG_DEBUG("%s stop", __PRETTY_FUNCTION__);
}
template<typename T>
TopPSamplingLayer<T>::TopPSamplingLayer(size_t max_batch_size,
size_t vocab_size,
size_t vocab_size_padded,
int end_id,
float top_p,
unsigned long long random_seed,
float temperature,
float len_penalty,
float repetition_penalty,
hipStream_t stream,
cublasMMWrapper* cublas_wrapper,
IAllocator* allocator,
bool is_free_buffer_after_forward,
hipDeviceProp_t* cuda_device_prop):
BaseSamplingLayer<T>(max_batch_size,
vocab_size,
vocab_size_padded,
end_id,
0,
top_p,
random_seed,
temperature,
len_penalty,
repetition_penalty,
stream,
cublas_wrapper,
allocator,
is_free_buffer_after_forward,
cuda_device_prop)
{
}
template<typename T>
TopPSamplingLayer<T>::TopPSamplingLayer(TopPSamplingLayer<T> const& top_p_sampling_layer):
BaseSamplingLayer<T>(top_p_sampling_layer)
{
}
template<typename T>
TopPSamplingLayer<T>::~TopPSamplingLayer()
{
freeBuffer();
}
template class TopPSamplingLayer<float>;
template class TopPSamplingLayer<half>;
} // namespace fastertransformer
| 7ae850143e058115f436f8c602f615a32ba2d1ce.cu | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2021, NAVER Corp. Authored by CLOVA.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include <float.h>
#include "src/fastertransformer/kernels/reduce_kernel_utils.cuh"
#include "src/fastertransformer/kernels/sampling_topk_kernels.h"
#include "src/fastertransformer/kernels/sampling_topp_kernels.h"
#include "src/fastertransformer/layers/sampling_layers/TopPSamplingLayer.h"
#include "src/fastertransformer/utils/logger.h"
#include "src/fastertransformer/utils/memory_utils.h"
namespace fastertransformer {
static __global__ void set_topp_runtime_args(int batch_size,
uint top_k,
uint* top_ks,
int top_ks_size,
float top_p,
float* top_ps,
int top_ps_size,
bool* skip_decode,
float* initial_top_p_buf,
float* top_p_decay_buf,
const float* top_p_decay,
float* top_p_min_buf,
const float* top_p_min,
int32_t* top_p_reset_ids_buf,
const uint32_t* top_p_reset_ids)
{
/**
* @brief Setup the runtime arguments for topp, broadcasting top_p to top_ps
and top_k to top_ks, copying top_p_decay/top_p_min/top_p_reset_ids
to internal buffers.
*
* \param batch_size [batch_size]
* \param op_k [batch_size]
* \param top_ks [batch_size]
* \param top_ks_size [batch_size]
* \param top_p [batch_size]
* \param top_ps [batch_size]
* \param top_ps_size [batch_size]
* \param skip_decode [batch_size]
* \param initial_top_p_buf [batch_size]
* \param top_p_decay_buf [batch_size]
* \param top_p_decay [batch_size], optional, must between [0, 1]
* \param top_p_min_buf [batch_size]
* \param top_p_min [batch_size], optional, must between [0, 1]
* \param top_p_reset_ids_buf [batch_size]
* \param top_p_reset_ids [batch_size], optional
*
*/
int index = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = index; i < batch_size; i += gridDim.x * blockDim.x) {
uint k = top_ks_size > 1 ? top_ks[i] : top_k;
float p = top_ps_size > 1 ? top_ps[i] : top_p;
if (k == 0 && p == 0.0f) {
// FT's topp implementation does not support topp = 0.0f, but it equivalent to greedy search.
// So, we set the topk = 1 as an alternative solution.
k = 1;
}
top_ks[i] = k;
// Clip p value if it is out of range. range = [0.0, 1.0].
top_ps[i] = p < 0.0f ? 0.0f : (p > 1.0f ? 1.0f : p);
if (p < 0.0f || p > 1.0f) {
printf("[WARNING] topp (%f) is out of range ([0.0, 1.0f]) for token %d"
" clip to closest number %f.\n",
p,
i,
top_ps[i]);
}
skip_decode[i] = k > 0;
initial_top_p_buf[i] = top_ps[i];
top_p_decay_buf[i] = top_p_decay == nullptr ? 1.0f : top_p_decay[i];
if (top_p_decay_buf[i] > 1.0f || top_p_decay_buf[i] <= 0.0f) {
printf("[WARNING] top_p_decay_buf (%f) is out of range ([0.0, 1.0f]) for token %d,"
" change to 1.0f.\n",
top_p_decay_buf[i],
i);
top_p_decay_buf[i] = 1.0f;
}
top_p_min_buf[i] = top_p_min == nullptr ? 1e-6f : top_p_min[i]; // prevent topp becoming 0.0
if (top_p_min_buf[i] > 1.0f || top_p_min_buf[i] <= 0.0f) {
printf("[WARNING] top_p_min_buf (%f) is out of range ([0.0, 1.0f]) for token %d,"
" change to 0.5f.\n",
top_p_min_buf[i],
i);
top_p_min_buf[i] = 0.5f;
}
top_p_reset_ids_buf[i] = (int32_t)(top_p_reset_ids == nullptr ? -1 : top_p_reset_ids[i]);
}
}
template<typename T>
void TopPSamplingLayer<T>::allocateBuffer()
{
FT_CHECK(false);
}
template<typename T>
void TopPSamplingLayer<T>::allocateBuffer(size_t batch_size, Tensor top_k, Tensor top_p)
{
FT_LOG_DEBUG(__PRETTY_FUNCTION__);
BaseSamplingLayer<T>::allocateBuffer(batch_size, top_k, top_p);
invokeTopPSampling<T>(nullptr, // workspace
sampling_workspace_size_,
cub_temp_storage_size_,
nullptr, // output_ids
nullptr, // sequence_length
nullptr, // finished_buffer
nullptr, // cum_log_probs
nullptr, // output_log_probs
nullptr, // log_probs
topp_id_vals_buf_,
topp_offset_buf_,
begin_topp_offset_buf_,
curandstate_buf_,
batch_size,
vocab_size_padded_,
nullptr,
top_p.size() > 0 ? top_p.max<float>() : 0.0f,
stream_,
cuda_device_prop_,
skip_decode_buf_);
sampling_workspace_ = allocator_->reMalloc(sampling_workspace_, sampling_workspace_size_, true);
runtime_top_k_buf_ =
reinterpret_cast<uint*>(allocator_->reMalloc(runtime_top_k_buf_, sizeof(uint) * batch_size, false));
runtime_top_p_buf_ =
reinterpret_cast<float*>(allocator_->reMalloc(runtime_top_p_buf_, sizeof(float) * batch_size, false));
initial_top_p_buf_ =
reinterpret_cast<float*>(allocator_->reMalloc(initial_top_p_buf_, sizeof(float) * batch_size, false));
top_p_decay_buf_ =
reinterpret_cast<float*>(allocator_->reMalloc(top_p_decay_buf_, sizeof(float) * batch_size, false));
top_p_min_buf_ = reinterpret_cast<float*>(allocator_->reMalloc(top_p_min_buf_, sizeof(float) * batch_size, false));
top_p_reset_ids_buf_ =
reinterpret_cast<int32_t*>(allocator_->reMalloc(top_p_reset_ids_buf_, sizeof(int32_t) * batch_size, false));
topp_id_vals_buf_ = reinterpret_cast<int*>(
allocator_->reMalloc(topp_id_vals_buf_, sizeof(int) * batch_size * vocab_size_padded_, false));
topp_offset_buf_ =
reinterpret_cast<int*>(allocator_->reMalloc(topp_offset_buf_, sizeof(int) * (batch_size + 1), false));
begin_topp_offset_buf_ =
reinterpret_cast<int*>(allocator_->reMalloc(begin_topp_offset_buf_, sizeof(int) * (batch_size + 1), false));
is_allocate_buffer_ = true;
}
template<typename T>
void TopPSamplingLayer<T>::freeBuffer()
{
FT_LOG_DEBUG(__PRETTY_FUNCTION__);
if (is_allocate_buffer_) {
allocator_->free((void**)(&sampling_workspace_));
allocator_->free((void**)(&topp_id_vals_buf_));
allocator_->free((void**)(&topp_offset_buf_));
allocator_->free((void**)(&begin_topp_offset_buf_));
allocator_->free((void**)(&runtime_top_k_buf_));
allocator_->free((void**)(&runtime_top_p_buf_));
allocator_->free((void**)(&initial_top_p_buf_));
allocator_->free((void**)(&top_p_decay_buf_));
allocator_->free((void**)(&top_p_min_buf_));
allocator_->free((void**)(&top_p_reset_ids_buf_));
}
BaseSamplingLayer<T>::freeBuffer();
is_allocate_buffer_ = false;
}
template<typename T>
void TopPSamplingLayer<T>::setup(const size_t batch_size, const size_t beam_width, TensorMap* runtime_args)
{
/**
* @brief Set up the sampling layer for given runtime arguments.
* runtime_args:
* \param runtime_top_k [1] or [batch_size] on cpu, optional.
* \param runtime_top_p [1] or [batch_size] on cpu, optional
* \param temperature [1] or [batch_size] on cpu, optional
* \param repetition_penalty [1] or [batch_size] on cpu, optional
* \param top_p_decay [batch_size] on gpu, float, optional
* \param top_p_min [batch_size] on gpu, float, optional
* \param top_p_reset_ids [batch_size] on gpu, uint32, optional
**/
FT_LOG_DEBUG(__PRETTY_FUNCTION__);
BaseSamplingLayer<T>::setup(batch_size, beam_width, runtime_args);
const Tensor runtime_top_p = runtime_args->isExist("runtime_top_p") ? runtime_args->at("runtime_top_p") : Tensor();
const size_t runtime_top_p_size = runtime_top_p.size();
if (runtime_top_p_size == 0) {
std::fill_n(skip_decode_, batch_size, true);
return;
}
uint tmp_top_k = 0;
const Tensor runtime_top_k = runtime_args->isExist("runtime_top_k") ?
runtime_args->at("runtime_top_k") :
Tensor(MEMORY_CPU, TYPE_UINT32, {1}, &tmp_top_k);
const size_t runtime_top_k_size = runtime_top_k.size();
uint top_k = runtime_top_k.getVal<uint>();
float top_p = runtime_top_p.getVal<float>();
if (runtime_top_k_size > 1) {
FT_CHECK(runtime_top_k.size() == batch_size);
cudaH2Dcpy(runtime_top_k_buf_, runtime_top_k.getPtr<uint>(), batch_size);
}
if (runtime_top_p_size > 1) {
FT_CHECK(runtime_top_p.size() == batch_size);
cudaH2Dcpy(runtime_top_p_buf_, runtime_top_p.getPtr<float>(), batch_size);
}
dim3 block(std::min((int)batch_size, 256));
dim3 grid(div_up((int)batch_size, (int)block.x));
const float* top_p_decay = runtime_args->getPtr<float>("top_p_decay", nullptr);
const float* top_p_min = runtime_args->getPtr<float>("top_p_min", nullptr);
const uint32_t* top_p_reset_ids = runtime_args->getPtr<uint32_t>("top_p_reset_ids", nullptr);
set_topp_runtime_args<<<grid, block, 0, stream_>>>(batch_size,
top_k,
runtime_top_k_buf_,
runtime_top_k_size,
top_p,
runtime_top_p_buf_,
runtime_top_p_size,
skip_decode_buf_,
initial_top_p_buf_,
top_p_decay_buf_,
top_p_decay,
top_p_min_buf_,
top_p_min,
top_p_reset_ids_buf_,
top_p_reset_ids);
sync_check_cuda_error();
cudaAutoCpy(skip_decode_, skip_decode_buf_, batch_size, stream_);
float* runtime_top_ps = new float[batch_size];
cudaAutoCpy(runtime_top_ps, runtime_top_p_buf_, batch_size, stream_);
runtime_max_top_p_ = *std::max_element(runtime_top_ps, runtime_top_ps + batch_size);
delete[] runtime_top_ps;
}
template<typename T>
void TopPSamplingLayer<T>::runSampling(TensorMap* output_tensors, TensorMap* input_tensors)
{
/**
* input_tensors:
* \param logits [local_batch_size, vocab_size_padded]
* \param embedding_bias [vocab_size_padded], optional
* \param step [1] on cpu
* \param max_input_length [1] on cpu
* \param input_lengths [local_batch_size], optional
* \param ite [1] on cpu
* output_tensors:
* \param output_ids [max_seq_len, batch_size]
* \param finished [local_batch_size], optional
* \param sequence_length [local_batch_size], optional
* \param cum_log_probs [batch_size], must be float*, optional
* \param The cumultative log probability of generated tokens.
* \param output_log_probs [local_batch_size], must be float*, optional
log probs at the current step.
**/
FT_LOG_DEBUG("%s start", __PRETTY_FUNCTION__);
FT_CHECK(input_tensors->size() >= 4);
FT_CHECK(output_tensors->size() >= 1);
const int batch_size = output_tensors->at("output_ids").shape[1];
const int local_batch_size = input_tensors->at("logits").shape[0];
const int step = input_tensors->at("step").getVal<int>();
const int ite = input_tensors->at("ite").getVal<int>();
// in case of skip any, the logit value is already copied and processed.
T* logits = !skip_any_ ? input_tensors->at("logits").getPtr<T>() : runtime_logits_buf_;
invokeTopPInitialize(
topp_id_vals_buf_, topp_offset_buf_, begin_topp_offset_buf_, local_batch_size, vocab_size_padded_, stream_);
sync_check_cuda_error();
invokeAddBiasSoftMax(logits,
(T*)(nullptr),
input_tensors->at("end_id").getPtr<int>(),
output_tensors->at("finished", Tensor{MEMORY_GPU, TYPE_INVALID, {}, nullptr}).getPtr<bool>(),
local_batch_size,
vocab_size_padded_,
vocab_size_,
stream_);
sync_check_cuda_error();
float* cum_log_probs =
output_tensors->isExist("cum_log_probs") ? output_tensors->at("cum_log_probs").getPtr<float>() : nullptr;
float* output_log_probs =
output_tensors->isExist("output_log_probs") ? output_tensors->at("output_log_probs").getPtr<float>() : nullptr;
invokeBatchTopPSampling<T>(
sampling_workspace_,
sampling_workspace_size_,
cub_temp_storage_size_,
output_tensors->at("output_ids").getPtrWithOffset<int>(step * batch_size + ite * local_batch_size),
output_tensors->at("sequence_length", Tensor{MEMORY_GPU, TYPE_INVALID, {}, nullptr}).getPtr<int>(),
output_tensors->at("finished", Tensor{MEMORY_GPU, TYPE_INVALID, {}, nullptr}).getPtr<bool>(),
cum_log_probs,
output_log_probs,
logits,
topp_id_vals_buf_,
topp_offset_buf_,
begin_topp_offset_buf_,
curandstate_buf_ + ite * local_batch_size,
local_batch_size,
vocab_size_padded_,
input_tensors->at("end_id").getPtr<int>(),
runtime_max_top_p_,
runtime_top_p_buf_ + ite * local_batch_size,
stream_,
cuda_device_prop_,
skip_decode_buf_ + ite * local_batch_size);
sync_check_cuda_error();
invokeComputeToppDecay(
runtime_top_p_buf_ + ite * local_batch_size,
initial_top_p_buf_ + ite * local_batch_size,
output_tensors->getPtrWithOffset<int>("output_ids", step * batch_size + ite * local_batch_size),
top_p_decay_buf_ + ite * local_batch_size,
top_p_min_buf_ + ite * local_batch_size,
top_p_reset_ids_buf_ + ite * local_batch_size,
local_batch_size,
stream_);
sync_check_cuda_error();
FT_LOG_DEBUG("%s stop", __PRETTY_FUNCTION__);
}
template<typename T>
TopPSamplingLayer<T>::TopPSamplingLayer(size_t max_batch_size,
size_t vocab_size,
size_t vocab_size_padded,
int end_id,
float top_p,
unsigned long long random_seed,
float temperature,
float len_penalty,
float repetition_penalty,
cudaStream_t stream,
cublasMMWrapper* cublas_wrapper,
IAllocator* allocator,
bool is_free_buffer_after_forward,
cudaDeviceProp* cuda_device_prop):
BaseSamplingLayer<T>(max_batch_size,
vocab_size,
vocab_size_padded,
end_id,
0,
top_p,
random_seed,
temperature,
len_penalty,
repetition_penalty,
stream,
cublas_wrapper,
allocator,
is_free_buffer_after_forward,
cuda_device_prop)
{
}
template<typename T>
TopPSamplingLayer<T>::TopPSamplingLayer(TopPSamplingLayer<T> const& top_p_sampling_layer):
BaseSamplingLayer<T>(top_p_sampling_layer)
{
}
template<typename T>
TopPSamplingLayer<T>::~TopPSamplingLayer()
{
freeBuffer();
}
template class TopPSamplingLayer<float>;
template class TopPSamplingLayer<half>;
} // namespace fastertransformer
|
7601f1a609ac00f9d11fe99d599f695fd214616f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "helper.hpp"
__global__ void s2g_gpu_scatter_kernel(uint32_t *in, uint32_t *out, int len) {
//@@ INSERT KERNEL CODE HERE
int x = threadIdx.x + blockDim.x * blockIdx.x;
if (x < len){
uint32_t intermediate = outInvariant(in[x]);
for (int y = 0; y < len; ++y){
int temp= outDependent(intermediate, x, y);
atomicAdd(&(out[y]), temp);
}
}
}
static void s2g_cpu_scatter(uint32_t *in, uint32_t *out, int len) {
for (int inIdx = 0; inIdx < len; ++inIdx) {
uint32_t intermediate = outInvariant(in[inIdx]);
for (int outIdx = 0; outIdx < len; ++outIdx) {
out[outIdx] += outDependent(intermediate, inIdx, outIdx);
}
}
}
static void s2g_gpu_scatter(uint32_t *in, uint32_t *out, int len) {
//@@ INSERT CODE HERE
//vecAdd<<<ceil(inputLength/256.0), 256>>>(deviceInput1, deviceInput2, deviceOutput, inputLength);
hipLaunchKernelGGL(( s2g_gpu_scatter_kernel), dim3(ceil(len/1024.0)), dim3(1024), 0, 0, in, out, len);
}
static int eval(int inputLength) {
uint32_t *deviceInput = nullptr;
uint32_t *deviceOutput= nullptr;
const std::string conf_info =
std::string("scatter[len:") + std::to_string(inputLength) + "]";
INFO("Running " << conf_info);
auto hostInput = generate_input(inputLength);
const size_t byteCount = inputLength * sizeof(uint32_t);
timer_start("Allocating GPU memory.");
THROW_IF_ERROR(hipMalloc((void **)&deviceInput, byteCount));
THROW_IF_ERROR(hipMalloc((void **)&deviceOutput, byteCount));
timer_stop();
timer_start("Copying input memory to the GPU.");
THROW_IF_ERROR(hipMemcpy(deviceInput, hostInput.data(), byteCount,
hipMemcpyHostToDevice));
THROW_IF_ERROR(hipMemset(deviceOutput, 0, byteCount));
timer_stop();
//////////////////////////////////////////
// GPU Scatter Computation
//////////////////////////////////////////
timer_start( "Performing GPU Scatter computation");
s2g_gpu_scatter(deviceInput, deviceOutput, inputLength);
timer_stop();
std::vector<uint32_t> hostOutput(inputLength);
timer_start( "Copying output memory to the CPU");
THROW_IF_ERROR(hipMemcpy(hostOutput.data(), deviceOutput, byteCount,
hipMemcpyDeviceToHost));
timer_stop();
auto expected = compute_output(hostInput, inputLength);
verify(expected, hostOutput);
hipFree(deviceInput);
hipFree(deviceOutput);
return 0;
}
TEST_CASE("Scatter", "[scatter]") {
SECTION("[inputSize:1024]") {
eval(1024);
}
SECTION("[inputSize:2048]") {
eval(2048);
}
SECTION("[inputSize:2047]") {
eval(2047);
}
SECTION("[inputSize:2049]") {
eval(2049);
}
SECTION("[inputSize:9101]") {
eval(9101);
}
SECTION("[inputSize:9910]") {
eval(9910);
}
SECTION("[inputSize:8192]") {
eval(8192);
}
SECTION("[inputSize:8193]") {
eval(8193);
}
SECTION("[inputSize:8191]") {
eval(8191);
}
SECTION("[inputSize:16191]") {
eval(16191);
}
}
| 7601f1a609ac00f9d11fe99d599f695fd214616f.cu | #include "helper.hpp"
__global__ void s2g_gpu_scatter_kernel(uint32_t *in, uint32_t *out, int len) {
//@@ INSERT KERNEL CODE HERE
int x = threadIdx.x + blockDim.x * blockIdx.x;
if (x < len){
uint32_t intermediate = outInvariant(in[x]);
for (int y = 0; y < len; ++y){
int temp= outDependent(intermediate, x, y);
atomicAdd(&(out[y]), temp);
}
}
}
static void s2g_cpu_scatter(uint32_t *in, uint32_t *out, int len) {
for (int inIdx = 0; inIdx < len; ++inIdx) {
uint32_t intermediate = outInvariant(in[inIdx]);
for (int outIdx = 0; outIdx < len; ++outIdx) {
out[outIdx] += outDependent(intermediate, inIdx, outIdx);
}
}
}
static void s2g_gpu_scatter(uint32_t *in, uint32_t *out, int len) {
//@@ INSERT CODE HERE
//vecAdd<<<ceil(inputLength/256.0), 256>>>(deviceInput1, deviceInput2, deviceOutput, inputLength);
s2g_gpu_scatter_kernel<<<ceil(len/1024.0), 1024>>>(in, out, len);
}
static int eval(int inputLength) {
uint32_t *deviceInput = nullptr;
uint32_t *deviceOutput= nullptr;
const std::string conf_info =
std::string("scatter[len:") + std::to_string(inputLength) + "]";
INFO("Running " << conf_info);
auto hostInput = generate_input(inputLength);
const size_t byteCount = inputLength * sizeof(uint32_t);
timer_start("Allocating GPU memory.");
THROW_IF_ERROR(cudaMalloc((void **)&deviceInput, byteCount));
THROW_IF_ERROR(cudaMalloc((void **)&deviceOutput, byteCount));
timer_stop();
timer_start("Copying input memory to the GPU.");
THROW_IF_ERROR(cudaMemcpy(deviceInput, hostInput.data(), byteCount,
cudaMemcpyHostToDevice));
THROW_IF_ERROR(cudaMemset(deviceOutput, 0, byteCount));
timer_stop();
//////////////////////////////////////////
// GPU Scatter Computation
//////////////////////////////////////////
timer_start( "Performing GPU Scatter computation");
s2g_gpu_scatter(deviceInput, deviceOutput, inputLength);
timer_stop();
std::vector<uint32_t> hostOutput(inputLength);
timer_start( "Copying output memory to the CPU");
THROW_IF_ERROR(cudaMemcpy(hostOutput.data(), deviceOutput, byteCount,
cudaMemcpyDeviceToHost));
timer_stop();
auto expected = compute_output(hostInput, inputLength);
verify(expected, hostOutput);
cudaFree(deviceInput);
cudaFree(deviceOutput);
return 0;
}
TEST_CASE("Scatter", "[scatter]") {
SECTION("[inputSize:1024]") {
eval(1024);
}
SECTION("[inputSize:2048]") {
eval(2048);
}
SECTION("[inputSize:2047]") {
eval(2047);
}
SECTION("[inputSize:2049]") {
eval(2049);
}
SECTION("[inputSize:9101]") {
eval(9101);
}
SECTION("[inputSize:9910]") {
eval(9910);
}
SECTION("[inputSize:8192]") {
eval(8192);
}
SECTION("[inputSize:8193]") {
eval(8193);
}
SECTION("[inputSize:8191]") {
eval(8191);
}
SECTION("[inputSize:16191]") {
eval(16191);
}
}
|
75c884605a23ab6294018fb9f0155c1fa6b8245f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "wb.h"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <stdlib.h>
#include <math.h>
#include <stdio.h>
#include <iostream>
#include <sys/time.h>
using namespace cv;
using namespace std;
#define MASK_SIZE 5
#define sigma 0.9
#define MASK_RADIUS MASK_SIZE/ 2
#define TILE_WIDTH 16
#define SIZE (TILE_WIDTH + MASK_SIZE - 1)
#define PI 3.141592653589793238
__constant__ float M[MASK_SIZE * MASK_SIZE];
__global__ void convolution2D (float * I,float * P,
int channels, int width, int height)
{
__shared__ float N_ds[SIZE][SIZE];
int bx = blockIdx.x, by = blockIdx.y;
int tx = threadIdx.x, ty = threadIdx.y;
for(int k=0;k<channels;k++){
int dest = ty * TILE_WIDTH + tx;
int destX = dest % SIZE;
int destY = dest / SIZE;
int srcY = by * TILE_WIDTH + destY - MASK_RADIUS;
int srcX = bx * TILE_WIDTH + destX - MASK_RADIUS;
int src = (srcY * width + srcX) * channels + k;
if (srcY >= 0 && srcY < height && srcX >= 0 && srcX < width)
N_ds[destY][destX] = I[src];
else
N_ds[destY][destX] = 0.0;
__syncthreads();
dest = ty * TILE_WIDTH + tx + TILE_WIDTH * TILE_WIDTH;
destY = dest / SIZE;
destX = dest % SIZE;
srcY = by * TILE_WIDTH + destY - MASK_RADIUS;
srcX = bx * TILE_WIDTH + destX - MASK_RADIUS;
src = (srcY * width + srcX) * channels + k;
if (destY < SIZE) {
if (srcY >= 0 && srcY < height && srcX >= 0 && srcX < width)
N_ds[destY][destX] = I[src];
else
N_ds[destY][destX] = 0.0;
}
__syncthreads();
float accum = 0;
for (int y = 0; y < MASK_SIZE; ++y)
for (int x = 0; x < MASK_SIZE; ++x)
accum += N_ds[ty + y][tx + x] * M[y * MASK_SIZE + x];
int y = by * TILE_WIDTH + ty;
int x = bx * TILE_WIDTH + tx;
if (y < height && x < width)
P[(y * width + x) * channels +k ] = min(max(accum, 0.0), 1.0);
__syncthreads();
}
}
int main (int argc, char * argv[ ])
{
wbArg_t arg;
int maskRows;
int maskColumns;
int imageChannels;
int imageWidth;
int imageHeight;
char * inputImageFile;
wbImage_t inputImage;
wbImage_t outputImage;
float * hostInputImageData;
float * hostOutputImageData;
float hostMaskData[MASK_SIZE*MASK_SIZE];
float * deviceInputImageData;
float * deviceOutputImageData;
float * deviceMaskData;
clock_t begin = clock();
clock_t begin_imread = clock();
arg = wbArg_read(argc, argv); /* parse the input arguments */
inputImageFile = wbArg_getInputFile(arg, 0);
inputImage = wbImport(inputImageFile);
printf("Image Dimension: %40d X %d \n",wbImage_getWidth(inputImage),wbImage_getHeight(inputImage));
//IplImage *img = cvLoadImage("input0.ppm",CV_LOAD_IMAGE_GRAYSCALE);
printf("Image Loading time: %40.6lf secs\n",(double)(clock()-begin_imread)/(double)(CLOCKS_PER_SEC));
maskRows = MASK_SIZE;
maskColumns = MASK_SIZE;
float mask[MASK_SIZE][MASK_SIZE];
float x,y;
clock_t begin_gauss = clock();
for(int i=0;i<MASK_SIZE;i++){
for(int j=0;j<MASK_SIZE;j++){
x = i - (maskRows/2);
y = j - (maskColumns/2);
mask[i][j] = -1.0 * (2 * sigma * sigma - (x * x + y * y)) /(2.0 * PI * sigma * sigma * sigma * sigma) * exp(-(x * x + y * y) / (2.0 * sigma * sigma));
hostMaskData[i*MASK_SIZE+j] = mask[i][j];
}
}
clock_t end_gauss = clock();
printf("Log Filter execution time: %40.6lf secs\n",(double)(end_gauss-begin_gauss)/(double)(CLOCKS_PER_SEC));
/*for(int i=0;i<MASK_SIZE;i++){
for(int j=0;j<MASK_SIZE;j++){
printf("%.1f ",hostMaskData[i*MASK_SIZE+j]);
}
cout<<endl;
}
*/
//////////////////////////////
imageWidth = wbImage_getWidth(inputImage);
imageHeight = wbImage_getHeight(inputImage);
imageChannels = wbImage_getChannels(inputImage);
// Mat A = Mat(imageHeight, imageWidth, CV_32FC3 ,wbImage_getData(inputImage));
// A.convertTo(A, CV_8UC3, 255.0);
// imwrite("Wind.jpg",A);
outputImage = wbImage_new(imageWidth, imageHeight, imageChannels);
hostInputImageData = wbImage_getData(inputImage);//(float *)img->imageData;
hostOutputImageData = wbImage_getData(outputImage);
clock_t begin_gpu_comp = clock();
clock_t begin_gpu_malloc = clock();
hipMalloc((void **) &deviceInputImageData, imageWidth * imageHeight * imageChannels * sizeof(float));
hipMalloc((void **) &deviceOutputImageData, imageWidth * imageHeight * imageChannels * sizeof(float));
hipMalloc((void **) &deviceMaskData, maskRows * maskColumns * sizeof(float));
printf("GPU memory allocation time: %40.6lf secs\n",(double)(clock()-begin_gpu_malloc)/(double)(CLOCKS_PER_SEC));
clock_t begin_copy_htod = clock();
hipMemcpyToSymbol(M, hostMaskData, sizeof(int) * MASK_SIZE * MASK_SIZE);//
hipMemcpy(deviceInputImageData, hostInputImageData,imageWidth * imageHeight * imageChannels * sizeof(float),hipMemcpyHostToDevice);
hipMemcpy(deviceMaskData, hostMaskData,maskRows * maskColumns * sizeof(float),hipMemcpyHostToDevice);
printf("Copy Time HOST to Device: %40.6lf secs\n",(double)(clock()-begin_copy_htod)/(double)(CLOCKS_PER_SEC));
hipEvent_t start,stop;
float tot;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
clock_t begin_comp = clock();
dim3 dimGrid(ceil((float) imageWidth / TILE_WIDTH),ceil((float) imageHeight / TILE_WIDTH));
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1);
hipLaunchKernelGGL(( convolution2D), dim3(dimGrid), dim3(dimBlock), 0, 0, deviceInputImageData, /*deviceMaskData,*/deviceOutputImageData, imageChannels, imageWidth, imageHeight);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&tot,start,stop);
printf("Computation time on GPU: %40.6lf secs\n",(double)(clock()-begin_comp)/(double)(CLOCKS_PER_SEC));
clock_t begin_copy_dtoh = clock();
hipMemcpy(hostOutputImageData, deviceOutputImageData,imageWidth * imageHeight * imageChannels * sizeof(float),hipMemcpyDeviceToHost);
printf("Copy time Device to HOST: %40.6lf secs\n",(double)(clock()-begin_copy_dtoh)/(double)(CLOCKS_PER_SEC));
printf("Total time: %40.6lf secs\n",(double)(clock()-begin_gpu_comp)/(double)(CLOCKS_PER_SEC));
Mat B = Mat(imageHeight, imageWidth, CV_32FC3, wbImage_getData(outputImage));
B.convertTo(B, CV_8UC3, 255.0);
imwrite("OUTPUT.jpg",B);
hipFree(deviceInputImageData);
hipFree(deviceOutputImageData);
hipFree(deviceMaskData);
wbImage_delete(outputImage);
wbImage_delete(inputImage);
cvWaitKey(0);
return 0;
}
| 75c884605a23ab6294018fb9f0155c1fa6b8245f.cu | #include "wb.h"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <stdlib.h>
#include <math.h>
#include <stdio.h>
#include <iostream>
#include <sys/time.h>
using namespace cv;
using namespace std;
#define MASK_SIZE 5
#define sigma 0.9
#define MASK_RADIUS MASK_SIZE/ 2
#define TILE_WIDTH 16
#define SIZE (TILE_WIDTH + MASK_SIZE - 1)
#define PI 3.141592653589793238
__constant__ float M[MASK_SIZE * MASK_SIZE];
__global__ void convolution2D (float * I,float * P,
int channels, int width, int height)
{
__shared__ float N_ds[SIZE][SIZE];
int bx = blockIdx.x, by = blockIdx.y;
int tx = threadIdx.x, ty = threadIdx.y;
for(int k=0;k<channels;k++){
int dest = ty * TILE_WIDTH + tx;
int destX = dest % SIZE;
int destY = dest / SIZE;
int srcY = by * TILE_WIDTH + destY - MASK_RADIUS;
int srcX = bx * TILE_WIDTH + destX - MASK_RADIUS;
int src = (srcY * width + srcX) * channels + k;
if (srcY >= 0 && srcY < height && srcX >= 0 && srcX < width)
N_ds[destY][destX] = I[src];
else
N_ds[destY][destX] = 0.0;
__syncthreads();
dest = ty * TILE_WIDTH + tx + TILE_WIDTH * TILE_WIDTH;
destY = dest / SIZE;
destX = dest % SIZE;
srcY = by * TILE_WIDTH + destY - MASK_RADIUS;
srcX = bx * TILE_WIDTH + destX - MASK_RADIUS;
src = (srcY * width + srcX) * channels + k;
if (destY < SIZE) {
if (srcY >= 0 && srcY < height && srcX >= 0 && srcX < width)
N_ds[destY][destX] = I[src];
else
N_ds[destY][destX] = 0.0;
}
__syncthreads();
float accum = 0;
for (int y = 0; y < MASK_SIZE; ++y)
for (int x = 0; x < MASK_SIZE; ++x)
accum += N_ds[ty + y][tx + x] * M[y * MASK_SIZE + x];
int y = by * TILE_WIDTH + ty;
int x = bx * TILE_WIDTH + tx;
if (y < height && x < width)
P[(y * width + x) * channels +k ] = min(max(accum, 0.0), 1.0);
__syncthreads();
}
}
int main (int argc, char * argv[ ])
{
wbArg_t arg;
int maskRows;
int maskColumns;
int imageChannels;
int imageWidth;
int imageHeight;
char * inputImageFile;
wbImage_t inputImage;
wbImage_t outputImage;
float * hostInputImageData;
float * hostOutputImageData;
float hostMaskData[MASK_SIZE*MASK_SIZE];
float * deviceInputImageData;
float * deviceOutputImageData;
float * deviceMaskData;
clock_t begin = clock();
clock_t begin_imread = clock();
arg = wbArg_read(argc, argv); /* parse the input arguments */
inputImageFile = wbArg_getInputFile(arg, 0);
inputImage = wbImport(inputImageFile);
printf("Image Dimension: %40d X %d \n",wbImage_getWidth(inputImage),wbImage_getHeight(inputImage));
//IplImage *img = cvLoadImage("input0.ppm",CV_LOAD_IMAGE_GRAYSCALE);
printf("Image Loading time: %40.6lf secs\n",(double)(clock()-begin_imread)/(double)(CLOCKS_PER_SEC));
maskRows = MASK_SIZE;
maskColumns = MASK_SIZE;
float mask[MASK_SIZE][MASK_SIZE];
float x,y;
clock_t begin_gauss = clock();
for(int i=0;i<MASK_SIZE;i++){
for(int j=0;j<MASK_SIZE;j++){
x = i - (maskRows/2);
y = j - (maskColumns/2);
mask[i][j] = -1.0 * (2 * sigma * sigma - (x * x + y * y)) /(2.0 * PI * sigma * sigma * sigma * sigma) * exp(-(x * x + y * y) / (2.0 * sigma * sigma));
hostMaskData[i*MASK_SIZE+j] = mask[i][j];
}
}
clock_t end_gauss = clock();
printf("Log Filter execution time: %40.6lf secs\n",(double)(end_gauss-begin_gauss)/(double)(CLOCKS_PER_SEC));
/*for(int i=0;i<MASK_SIZE;i++){
for(int j=0;j<MASK_SIZE;j++){
printf("%.1f ",hostMaskData[i*MASK_SIZE+j]);
}
cout<<endl;
}
*/
//////////////////////////////
imageWidth = wbImage_getWidth(inputImage);
imageHeight = wbImage_getHeight(inputImage);
imageChannels = wbImage_getChannels(inputImage);
// Mat A = Mat(imageHeight, imageWidth, CV_32FC3 ,wbImage_getData(inputImage));
// A.convertTo(A, CV_8UC3, 255.0);
// imwrite("Wind.jpg",A);
outputImage = wbImage_new(imageWidth, imageHeight, imageChannels);
hostInputImageData = wbImage_getData(inputImage);//(float *)img->imageData;
hostOutputImageData = wbImage_getData(outputImage);
clock_t begin_gpu_comp = clock();
clock_t begin_gpu_malloc = clock();
cudaMalloc((void **) &deviceInputImageData, imageWidth * imageHeight * imageChannels * sizeof(float));
cudaMalloc((void **) &deviceOutputImageData, imageWidth * imageHeight * imageChannels * sizeof(float));
cudaMalloc((void **) &deviceMaskData, maskRows * maskColumns * sizeof(float));
printf("GPU memory allocation time: %40.6lf secs\n",(double)(clock()-begin_gpu_malloc)/(double)(CLOCKS_PER_SEC));
clock_t begin_copy_htod = clock();
cudaMemcpyToSymbol(M, hostMaskData, sizeof(int) * MASK_SIZE * MASK_SIZE);//
cudaMemcpy(deviceInputImageData, hostInputImageData,imageWidth * imageHeight * imageChannels * sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(deviceMaskData, hostMaskData,maskRows * maskColumns * sizeof(float),cudaMemcpyHostToDevice);
printf("Copy Time HOST to Device: %40.6lf secs\n",(double)(clock()-begin_copy_htod)/(double)(CLOCKS_PER_SEC));
cudaEvent_t start,stop;
float tot;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
clock_t begin_comp = clock();
dim3 dimGrid(ceil((float) imageWidth / TILE_WIDTH),ceil((float) imageHeight / TILE_WIDTH));
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1);
convolution2D<<<dimGrid, dimBlock>>>(deviceInputImageData, /*deviceMaskData,*/deviceOutputImageData, imageChannels, imageWidth, imageHeight);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&tot,start,stop);
printf("Computation time on GPU: %40.6lf secs\n",(double)(clock()-begin_comp)/(double)(CLOCKS_PER_SEC));
clock_t begin_copy_dtoh = clock();
cudaMemcpy(hostOutputImageData, deviceOutputImageData,imageWidth * imageHeight * imageChannels * sizeof(float),cudaMemcpyDeviceToHost);
printf("Copy time Device to HOST: %40.6lf secs\n",(double)(clock()-begin_copy_dtoh)/(double)(CLOCKS_PER_SEC));
printf("Total time: %40.6lf secs\n",(double)(clock()-begin_gpu_comp)/(double)(CLOCKS_PER_SEC));
Mat B = Mat(imageHeight, imageWidth, CV_32FC3, wbImage_getData(outputImage));
B.convertTo(B, CV_8UC3, 255.0);
imwrite("OUTPUT.jpg",B);
cudaFree(deviceInputImageData);
cudaFree(deviceOutputImageData);
cudaFree(deviceMaskData);
wbImage_delete(outputImage);
wbImage_delete(inputImage);
cvWaitKey(0);
return 0;
}
|
591a94bd280419469f0b865c6b9d161fab962e8f.hip | // !!! This is a file automatically generated by hipify!!!
// CIS565 CUDA Raytracer: A parallel raytracer for Patrick Cozzi's CIS565: GPU Computing at the University of Pennsylvania
// Written by Yining Karl Li, Copyright (c) 2012 University of Pennsylvania
// This file includes code from:
// Rob Farber for CUDA-GL interop, from CUDA Supercomputing For The Masses: http://www.drdobbs.com/architecture-and-design/cuda-supercomputing-for-the-masses-part/222600097
// Peter Kutz and Yining Karl Li's GPU Pathtracer: http://gpupathtracer.blogspot.com/
// Yining Karl Li's TAKUA Render, a massively parallel pathtracing renderer: http://www.yiningkarlli.com
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cmath>
#include <thrust/count.h>
#include <thrust\device_vector.h>
#include <thrust\remove.h>
#include "sceneStructs.h"
#include "glm/glm.hpp"
#include "utilities.h"
#include "raytraceKernel.h"
#include "intersections.h"
#include "interactions.h"
#define MAX_TRAVEL_DIST 9999999.99f
#define ENABLE_AA 1
#define ENABLE_MOTION_BLUR 0
#define ENABLE_DOF 1
#define APERTURE_RADIUS 0.1f
#define FOCALLEN_LENGTH 1.1f
void checkCUDAError(const char *msg) {
hipError_t err = hipGetLastError();
if( hipSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
// TODO: IMPLEMENT THIS FUNCTION
// Function that does the initial raycast from the camera
__host__ __device__ ray raycastFromCameraKernel(glm::vec2 resolution, float time, int x, int y, glm::vec3 eye, glm::vec3 view, glm::vec3 up, glm::vec2 fov){
ray r;
r.origin = eye;
glm::vec3 image_x_direction=glm::cross(view,up);
glm::vec3 image_y_direction=-up;
glm::vec3 image_center=eye+view;
float px=float(x);
float py=float(y);
if(ENABLE_DOF)
{
thrust::default_random_engine rng(hash(time+1.0f));
thrust::uniform_real_distribution<float> u01(-1.0f,1.0f);
r.origin=r.origin+u01(rng)*image_x_direction*APERTURE_RADIUS;
r.origin=r.origin+u01(rng)*image_y_direction*APERTURE_RADIUS;
image_center=eye+FOCALLEN_LENGTH*view;
}
//http://en.wikipedia.org/wiki/Supersampling for Anti Aliasing
if(ENABLE_AA)
{
thrust::default_random_engine rng(hash((time+1.0f)*(px+2.0f)*(py+3.0f)));
thrust::uniform_real_distribution<float> u01(-1.5f,1.5f);
px=px+u01(rng);
py=py+u01(rng);
}
float image_x=((float)px-(float)resolution.x/2)/((float)resolution.x/2);
float image_y=((float)py-(float)resolution.y/2)/((float)resolution.y/2);
float angle_x=fov.x;
float angle_y=fov.y;
glm::vec3 image_pos=image_center+image_x*glm::length(view)*tan(angle_x)*glm::normalize(image_x_direction)+image_y*glm::length(view)*tan(angle_y)*glm::normalize(image_y_direction);
glm::vec3 ray_direction=glm::normalize(image_pos-eye);
r.direction=ray_direction;
r.travel_dist=0;
return r;
}
//Kernel that blacks out a given image buffer
__global__ void clearImage(glm::vec2 resolution, glm::vec3* image){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
image[index] = glm::vec3(0,0,0);
}
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* PBOpos, glm::vec2 resolution, glm::vec3* image){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
glm::vec3 color;
color.x = image[index].x*255.0;
color.y = image[index].y*255.0;
color.z = image[index].z*255.0;
if(color.x>255){
color.x = 255;
}
if(color.y>255){
color.y = 255;
}
if(color.z>255){
color.z = 255;
}
// Each thread writes one pixel location in the texture (textel)
PBOpos[index].w = 0;
PBOpos[index].x = color.x;
PBOpos[index].y = color.y;
PBOpos[index].z = color.z;
}
}
// LOOK: This function demonstrates how to use thrust for random number generation on the GPU!
// Function that generates static.
__host__ __device__ glm::vec3 generateRandomNumberFromThread(glm::vec2 resolution, float time, int x, int y){
int index = x + (y * resolution.x);
thrust::default_random_engine rng(hash(index*time));
thrust::uniform_real_distribution<float> u01(0,1);
return glm::vec3((float) u01(rng), (float) u01(rng), (float) u01(rng));
}
__global__ void InitRays(ray* activeRays, glm::vec2 resolution,float time, cameraData cam)
{
int x=blockIdx.x*blockDim.x+threadIdx.x;
int y=blockIdx.y*blockDim.y+threadIdx.y;
int index=x+y*resolution.x;
if(x<=resolution.x && y<=resolution.y)
{
ray newRay=raycastFromCameraKernel(resolution,time,x,y,cam.position,cam.view,cam.up,cam.fov*(float)PI/180.0f);
newRay.color=glm::vec3(1.0f);
newRay.is_Active=true;
newRay.index=index;
activeRays[index]=newRay;
}
}
__global__ void average_image(glm::vec2 resolution,float time,glm::vec3* current_image,glm::vec3* final_image)
{
int x=blockIdx.x*blockDim.x+threadIdx.x;
int y=blockIdx.y*blockDim.y+threadIdx.y;
int index=x+y*resolution.x;
if(x<=resolution.x && y<=resolution.y)
{
//final_image[index]=current_image[index]/(float)time+final_image[index]*(time-1)/(float)time;
final_image[index]=current_image[index]/(float)time+final_image[index]*(time-1)/(float)time;
glm::clamp(final_image[index],0.0f,1.0f);
}
}
// TODO: IMPLEMENT THIS FUNCTION
// Core raytracer kernel
__global__ void raytraceRay(ray* activeRays,int N,int current_depth,glm::vec2 resolution, float time, cameraData cam, glm::vec3* colors,
staticGeom* geoms, int numberOfGeoms, material* materials, int numberOfMaterials){
int index = blockIdx.x*blockDim.x+threadIdx.x;
if(index<N){
//test for direction
//ray newRay = raycastFromCameraKernel(resolution,time,x,y,cam.position,cam.view,cam.up,cam.fov*(float)PI/180.0f);
//colors[index]=newRay.direction;
if(activeRays[index].is_Active)
{
glm::vec3 intersectionPoint, normal;
glm::vec3 temp_intersectionPoint,temp_normal;
float travelDist(MAX_TRAVEL_DIST);
float d;
int MaterialID,ObjectID;
for(int i=0;i<numberOfGeoms;i++)
{
if(geoms[i].type==SPHERE)
d=sphereIntersectionTest(geoms[i],activeRays[index],temp_intersectionPoint,temp_normal);
else if(geoms[i].type==CUBE)
d=boxIntersectionTest(geoms[i],activeRays[index],temp_intersectionPoint,temp_normal);
if(d>0.0f && d<travelDist)
{
travelDist=d;
intersectionPoint=temp_intersectionPoint;
normal=temp_normal;
MaterialID=geoms[i].materialid;
ObjectID=i;
}
}
if(travelDist<0.0f||travelDist>=MAX_TRAVEL_DIST)
{
activeRays[index].is_Active=false;
return;
}
material M=materials[MaterialID];
activeRays[index].travel_dist+=travelDist;
if(M.emittance>0.001f)
{
colors[activeRays[index].index]=exp(-0.05f*activeRays[index].travel_dist)*M.emittance*M.color*activeRays[index].color;
activeRays[index].is_Active=false;
return;
}
else
{
float randSeed=((float)time+1.0f)*((float)index+2.0f)*((float)current_depth+3.0f);
//int flag;
//flag=calculateBSDF(randSeed, activeRays[index], geoms,ObjectID,intersectionPoint,normal,M);
/*if(flag==0)
{
activeRays[index].color=glm::vec3(1.0f,0.0f,0.0f);
}
else if(flag==1)
{
activeRays[index].color=glm::vec3(0.0f,1.0f,0.0f);
}
else
{
activeRays[index].color=glm::vec3(0.0f,0.0f,1.0f);
}*/
calculateBSDF(randSeed, activeRays[index], geoms,ObjectID,intersectionPoint,normal,M);
return;
}
}
else
{
return;
}
}
}
//helper function for stream compact
struct ray_isActive
{
__host__ __device__ bool operator()(const ray Ray)
{
return !Ray.is_Active;
}
};
// TODO: FINISH THIS FUNCTION
// Wrapper for the __global__ call that sets up the kernel calls and does a ton of memory management
void cudaRaytraceCore(uchar4* PBOpos, camera* renderCam, int frame, int iterations, material* materials, int numberOfMaterials, geom* geoms, int numberOfGeoms){
// send image to GPU
glm::vec3* cudaimage = NULL;
hipMalloc((void**)&cudaimage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3));
hipMemcpy( cudaimage, renderCam->image, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3), hipMemcpyHostToDevice);
//send current image to GPU
glm::vec3* current_cudaimage = NULL;
hipMalloc((void**)¤t_cudaimage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3));
hipMemcpy( current_cudaimage, renderCam->image, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3), hipMemcpyHostToDevice);
//send rays to GPU
ray* activeRays=NULL;
int Num_rays=renderCam->resolution.x*renderCam->resolution.y;
hipMalloc((void**)&activeRays,Num_rays*sizeof(ray));
// package geometry and materials and sent to GPU
staticGeom* geomList = new staticGeom[numberOfGeoms];
for(int i=0; i<numberOfGeoms; i++){
staticGeom newStaticGeom;
newStaticGeom.type = geoms[i].type;
newStaticGeom.materialid = geoms[i].materialid;
newStaticGeom.translation = geoms[i].translations[frame];
newStaticGeom.rotation = geoms[i].rotations[frame];
newStaticGeom.scale = geoms[i].scales[frame];
newStaticGeom.transform = geoms[i].transforms[frame];
newStaticGeom.inverseTransform = geoms[i].inverseTransforms[frame];
//motion blur
if(ENABLE_MOTION_BLUR)
{
if(i==6)
{
newStaticGeom.translation.x-=(float)iterations/3000;
newStaticGeom.translation.y+=(float)iterations/3000;
glm::mat4 new_transform=utilityCore::buildTransformationMatrix(newStaticGeom.translation,newStaticGeom.rotation,newStaticGeom.scale);
newStaticGeom.transform=utilityCore::glmMat4ToCudaMat4(new_transform);
newStaticGeom.inverseTransform=utilityCore::glmMat4ToCudaMat4(glm::inverse(new_transform));
}
}
geomList[i] = newStaticGeom;
}
//send geometry
staticGeom* cudageoms = NULL;
hipMalloc((void**)&cudageoms, numberOfGeoms*sizeof(staticGeom));
hipMemcpy( cudageoms, geomList, numberOfGeoms*sizeof(staticGeom), hipMemcpyHostToDevice);
//send materials
material* cudamaterials=NULL;
hipMalloc((void**)&cudamaterials,numberOfMaterials*sizeof(material));
hipMemcpy(cudamaterials,materials,numberOfMaterials*sizeof(material),hipMemcpyHostToDevice);
// package camera
cameraData cam;
cam.resolution = renderCam->resolution;
cam.position = renderCam->positions[frame];
cam.view = renderCam->views[frame];
cam.up = renderCam->ups[frame];
cam.fov = renderCam->fov;
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
bool stream_compact=false;
int traceDepth=10;
// set up crucial magic
int tileSize = 16;
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(renderCam->resolution.x)/float(tileSize)), (int)ceil(float(renderCam->resolution.y)/float(tileSize)));
hipLaunchKernelGGL(( InitRays), dim3(fullBlocksPerGrid),dim3(threadsPerBlock), 0, 0, activeRays, renderCam->resolution,(float)iterations,cam);
// kernel launches
int blockSize=64;
for(int i=0;i<traceDepth;i++)
{
if(stream_compact)
{
thrust::device_ptr<ray> current_rays(activeRays);
thrust::device_ptr<ray> new_rays=thrust::remove_if(current_rays,current_rays+Num_rays,ray_isActive());
Num_rays=new_rays.get()-current_rays.get();
//printf("%d\n",Num_rays);
if(Num_rays<1.0f)
break;
}
hipLaunchKernelGGL(( raytraceRay), dim3(ceil((float)Num_rays/blockSize)),dim3(blockSize), 0, 0, activeRays,Num_rays,i,renderCam->resolution, (float)iterations, cam, current_cudaimage, cudageoms, numberOfGeoms,cudamaterials,numberOfMaterials);
}
hipLaunchKernelGGL(( average_image), dim3(fullBlocksPerGrid),dim3(threadsPerBlock), 0, 0, renderCam->resolution,(float)iterations,current_cudaimage,cudaimage);
hipLaunchKernelGGL(( sendImageToPBO), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, PBOpos, renderCam->resolution, cudaimage);
// retrieve image from GPU
hipMemcpy( renderCam->image, cudaimage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3), hipMemcpyDeviceToHost);
// free up stuff, or else we'll leak memory like a madman
hipFree( cudaimage );
hipFree( cudageoms );
hipFree(current_cudaimage);
hipFree(activeRays);
delete geomList;
// make certain the kernel has completed
hipDeviceSynchronize();
checkCUDAError("Kernel failed!");
}
| 591a94bd280419469f0b865c6b9d161fab962e8f.cu | // CIS565 CUDA Raytracer: A parallel raytracer for Patrick Cozzi's CIS565: GPU Computing at the University of Pennsylvania
// Written by Yining Karl Li, Copyright (c) 2012 University of Pennsylvania
// This file includes code from:
// Rob Farber for CUDA-GL interop, from CUDA Supercomputing For The Masses: http://www.drdobbs.com/architecture-and-design/cuda-supercomputing-for-the-masses-part/222600097
// Peter Kutz and Yining Karl Li's GPU Pathtracer: http://gpupathtracer.blogspot.com/
// Yining Karl Li's TAKUA Render, a massively parallel pathtracing renderer: http://www.yiningkarlli.com
#include <stdio.h>
#include <cuda.h>
#include <cmath>
#include <thrust/count.h>
#include <thrust\device_vector.h>
#include <thrust\remove.h>
#include "sceneStructs.h"
#include "glm/glm.hpp"
#include "utilities.h"
#include "raytraceKernel.h"
#include "intersections.h"
#include "interactions.h"
#define MAX_TRAVEL_DIST 9999999.99f
#define ENABLE_AA 1
#define ENABLE_MOTION_BLUR 0
#define ENABLE_DOF 1
#define APERTURE_RADIUS 0.1f
#define FOCALLEN_LENGTH 1.1f
void checkCUDAError(const char *msg) {
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
// TODO: IMPLEMENT THIS FUNCTION
// Function that does the initial raycast from the camera
__host__ __device__ ray raycastFromCameraKernel(glm::vec2 resolution, float time, int x, int y, glm::vec3 eye, glm::vec3 view, glm::vec3 up, glm::vec2 fov){
ray r;
r.origin = eye;
glm::vec3 image_x_direction=glm::cross(view,up);
glm::vec3 image_y_direction=-up;
glm::vec3 image_center=eye+view;
float px=float(x);
float py=float(y);
if(ENABLE_DOF)
{
thrust::default_random_engine rng(hash(time+1.0f));
thrust::uniform_real_distribution<float> u01(-1.0f,1.0f);
r.origin=r.origin+u01(rng)*image_x_direction*APERTURE_RADIUS;
r.origin=r.origin+u01(rng)*image_y_direction*APERTURE_RADIUS;
image_center=eye+FOCALLEN_LENGTH*view;
}
//http://en.wikipedia.org/wiki/Supersampling for Anti Aliasing
if(ENABLE_AA)
{
thrust::default_random_engine rng(hash((time+1.0f)*(px+2.0f)*(py+3.0f)));
thrust::uniform_real_distribution<float> u01(-1.5f,1.5f);
px=px+u01(rng);
py=py+u01(rng);
}
float image_x=((float)px-(float)resolution.x/2)/((float)resolution.x/2);
float image_y=((float)py-(float)resolution.y/2)/((float)resolution.y/2);
float angle_x=fov.x;
float angle_y=fov.y;
glm::vec3 image_pos=image_center+image_x*glm::length(view)*tan(angle_x)*glm::normalize(image_x_direction)+image_y*glm::length(view)*tan(angle_y)*glm::normalize(image_y_direction);
glm::vec3 ray_direction=glm::normalize(image_pos-eye);
r.direction=ray_direction;
r.travel_dist=0;
return r;
}
//Kernel that blacks out a given image buffer
__global__ void clearImage(glm::vec2 resolution, glm::vec3* image){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
image[index] = glm::vec3(0,0,0);
}
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* PBOpos, glm::vec2 resolution, glm::vec3* image){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
glm::vec3 color;
color.x = image[index].x*255.0;
color.y = image[index].y*255.0;
color.z = image[index].z*255.0;
if(color.x>255){
color.x = 255;
}
if(color.y>255){
color.y = 255;
}
if(color.z>255){
color.z = 255;
}
// Each thread writes one pixel location in the texture (textel)
PBOpos[index].w = 0;
PBOpos[index].x = color.x;
PBOpos[index].y = color.y;
PBOpos[index].z = color.z;
}
}
// LOOK: This function demonstrates how to use thrust for random number generation on the GPU!
// Function that generates static.
__host__ __device__ glm::vec3 generateRandomNumberFromThread(glm::vec2 resolution, float time, int x, int y){
int index = x + (y * resolution.x);
thrust::default_random_engine rng(hash(index*time));
thrust::uniform_real_distribution<float> u01(0,1);
return glm::vec3((float) u01(rng), (float) u01(rng), (float) u01(rng));
}
__global__ void InitRays(ray* activeRays, glm::vec2 resolution,float time, cameraData cam)
{
int x=blockIdx.x*blockDim.x+threadIdx.x;
int y=blockIdx.y*blockDim.y+threadIdx.y;
int index=x+y*resolution.x;
if(x<=resolution.x && y<=resolution.y)
{
ray newRay=raycastFromCameraKernel(resolution,time,x,y,cam.position,cam.view,cam.up,cam.fov*(float)PI/180.0f);
newRay.color=glm::vec3(1.0f);
newRay.is_Active=true;
newRay.index=index;
activeRays[index]=newRay;
}
}
__global__ void average_image(glm::vec2 resolution,float time,glm::vec3* current_image,glm::vec3* final_image)
{
int x=blockIdx.x*blockDim.x+threadIdx.x;
int y=blockIdx.y*blockDim.y+threadIdx.y;
int index=x+y*resolution.x;
if(x<=resolution.x && y<=resolution.y)
{
//final_image[index]=current_image[index]/(float)time+final_image[index]*(time-1)/(float)time;
final_image[index]=current_image[index]/(float)time+final_image[index]*(time-1)/(float)time;
glm::clamp(final_image[index],0.0f,1.0f);
}
}
// TODO: IMPLEMENT THIS FUNCTION
// Core raytracer kernel
__global__ void raytraceRay(ray* activeRays,int N,int current_depth,glm::vec2 resolution, float time, cameraData cam, glm::vec3* colors,
staticGeom* geoms, int numberOfGeoms, material* materials, int numberOfMaterials){
int index = blockIdx.x*blockDim.x+threadIdx.x;
if(index<N){
//test for direction
//ray newRay = raycastFromCameraKernel(resolution,time,x,y,cam.position,cam.view,cam.up,cam.fov*(float)PI/180.0f);
//colors[index]=newRay.direction;
if(activeRays[index].is_Active)
{
glm::vec3 intersectionPoint, normal;
glm::vec3 temp_intersectionPoint,temp_normal;
float travelDist(MAX_TRAVEL_DIST);
float d;
int MaterialID,ObjectID;
for(int i=0;i<numberOfGeoms;i++)
{
if(geoms[i].type==SPHERE)
d=sphereIntersectionTest(geoms[i],activeRays[index],temp_intersectionPoint,temp_normal);
else if(geoms[i].type==CUBE)
d=boxIntersectionTest(geoms[i],activeRays[index],temp_intersectionPoint,temp_normal);
if(d>0.0f && d<travelDist)
{
travelDist=d;
intersectionPoint=temp_intersectionPoint;
normal=temp_normal;
MaterialID=geoms[i].materialid;
ObjectID=i;
}
}
if(travelDist<0.0f||travelDist>=MAX_TRAVEL_DIST)
{
activeRays[index].is_Active=false;
return;
}
material M=materials[MaterialID];
activeRays[index].travel_dist+=travelDist;
if(M.emittance>0.001f)
{
colors[activeRays[index].index]=exp(-0.05f*activeRays[index].travel_dist)*M.emittance*M.color*activeRays[index].color;
activeRays[index].is_Active=false;
return;
}
else
{
float randSeed=((float)time+1.0f)*((float)index+2.0f)*((float)current_depth+3.0f);
//int flag;
//flag=calculateBSDF(randSeed, activeRays[index], geoms,ObjectID,intersectionPoint,normal,M);
/*if(flag==0)
{
activeRays[index].color=glm::vec3(1.0f,0.0f,0.0f);
}
else if(flag==1)
{
activeRays[index].color=glm::vec3(0.0f,1.0f,0.0f);
}
else
{
activeRays[index].color=glm::vec3(0.0f,0.0f,1.0f);
}*/
calculateBSDF(randSeed, activeRays[index], geoms,ObjectID,intersectionPoint,normal,M);
return;
}
}
else
{
return;
}
}
}
//helper function for stream compact
struct ray_isActive
{
__host__ __device__ bool operator()(const ray Ray)
{
return !Ray.is_Active;
}
};
// TODO: FINISH THIS FUNCTION
// Wrapper for the __global__ call that sets up the kernel calls and does a ton of memory management
void cudaRaytraceCore(uchar4* PBOpos, camera* renderCam, int frame, int iterations, material* materials, int numberOfMaterials, geom* geoms, int numberOfGeoms){
// send image to GPU
glm::vec3* cudaimage = NULL;
cudaMalloc((void**)&cudaimage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3));
cudaMemcpy( cudaimage, renderCam->image, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3), cudaMemcpyHostToDevice);
//send current image to GPU
glm::vec3* current_cudaimage = NULL;
cudaMalloc((void**)¤t_cudaimage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3));
cudaMemcpy( current_cudaimage, renderCam->image, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3), cudaMemcpyHostToDevice);
//send rays to GPU
ray* activeRays=NULL;
int Num_rays=renderCam->resolution.x*renderCam->resolution.y;
cudaMalloc((void**)&activeRays,Num_rays*sizeof(ray));
// package geometry and materials and sent to GPU
staticGeom* geomList = new staticGeom[numberOfGeoms];
for(int i=0; i<numberOfGeoms; i++){
staticGeom newStaticGeom;
newStaticGeom.type = geoms[i].type;
newStaticGeom.materialid = geoms[i].materialid;
newStaticGeom.translation = geoms[i].translations[frame];
newStaticGeom.rotation = geoms[i].rotations[frame];
newStaticGeom.scale = geoms[i].scales[frame];
newStaticGeom.transform = geoms[i].transforms[frame];
newStaticGeom.inverseTransform = geoms[i].inverseTransforms[frame];
//motion blur
if(ENABLE_MOTION_BLUR)
{
if(i==6)
{
newStaticGeom.translation.x-=(float)iterations/3000;
newStaticGeom.translation.y+=(float)iterations/3000;
glm::mat4 new_transform=utilityCore::buildTransformationMatrix(newStaticGeom.translation,newStaticGeom.rotation,newStaticGeom.scale);
newStaticGeom.transform=utilityCore::glmMat4ToCudaMat4(new_transform);
newStaticGeom.inverseTransform=utilityCore::glmMat4ToCudaMat4(glm::inverse(new_transform));
}
}
geomList[i] = newStaticGeom;
}
//send geometry
staticGeom* cudageoms = NULL;
cudaMalloc((void**)&cudageoms, numberOfGeoms*sizeof(staticGeom));
cudaMemcpy( cudageoms, geomList, numberOfGeoms*sizeof(staticGeom), cudaMemcpyHostToDevice);
//send materials
material* cudamaterials=NULL;
cudaMalloc((void**)&cudamaterials,numberOfMaterials*sizeof(material));
cudaMemcpy(cudamaterials,materials,numberOfMaterials*sizeof(material),cudaMemcpyHostToDevice);
// package camera
cameraData cam;
cam.resolution = renderCam->resolution;
cam.position = renderCam->positions[frame];
cam.view = renderCam->views[frame];
cam.up = renderCam->ups[frame];
cam.fov = renderCam->fov;
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
bool stream_compact=false;
int traceDepth=10;
// set up crucial magic
int tileSize = 16;
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(renderCam->resolution.x)/float(tileSize)), (int)ceil(float(renderCam->resolution.y)/float(tileSize)));
InitRays<<<fullBlocksPerGrid,threadsPerBlock>>>(activeRays, renderCam->resolution,(float)iterations,cam);
// kernel launches
int blockSize=64;
for(int i=0;i<traceDepth;i++)
{
if(stream_compact)
{
thrust::device_ptr<ray> current_rays(activeRays);
thrust::device_ptr<ray> new_rays=thrust::remove_if(current_rays,current_rays+Num_rays,ray_isActive());
Num_rays=new_rays.get()-current_rays.get();
//printf("%d\n",Num_rays);
if(Num_rays<1.0f)
break;
}
raytraceRay<<<ceil((float)Num_rays/blockSize),blockSize>>>(activeRays,Num_rays,i,renderCam->resolution, (float)iterations, cam, current_cudaimage, cudageoms, numberOfGeoms,cudamaterials,numberOfMaterials);
}
average_image<<<fullBlocksPerGrid,threadsPerBlock>>>(renderCam->resolution,(float)iterations,current_cudaimage,cudaimage);
sendImageToPBO<<<fullBlocksPerGrid, threadsPerBlock>>>(PBOpos, renderCam->resolution, cudaimage);
// retrieve image from GPU
cudaMemcpy( renderCam->image, cudaimage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3), cudaMemcpyDeviceToHost);
// free up stuff, or else we'll leak memory like a madman
cudaFree( cudaimage );
cudaFree( cudageoms );
cudaFree(current_cudaimage);
cudaFree(activeRays);
delete geomList;
// make certain the kernel has completed
cudaThreadSynchronize();
checkCUDAError("Kernel failed!");
}
|
fb494db606ac9289f18c30ec559182ebe6816ce6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "cudaOverlay.h"
// cudaOverlay
template<typename T>
__global__ void gpuOverlay( T* input, int inputWidth, T* output, int outputWidth, int outputHeight, int x0, int y0 )
{
const int input_x = blockIdx.x * blockDim.x + threadIdx.x;
const int input_y = blockIdx.y * blockDim.y + threadIdx.y;
const int x = input_x + x0;
const int y = input_y + y0;
if( x >= outputWidth || y >= outputHeight )
return;
output[y * outputWidth + x] = input[input_y * inputWidth + input_x];
}
hipError_t cudaOverlay( void* input, size_t inputWidth, size_t inputHeight,
void* output, size_t outputWidth, size_t outputHeight,
imageFormat format, int x, int y )
{
if( !input || !output || inputWidth == 0 || inputHeight == 0 || outputWidth == 0 || outputHeight == 0 )
return hipErrorInvalidValue;
if( x < 0 || y < 0 || x >= outputWidth || y >= outputHeight )
return hipErrorInvalidValue;
if( !imageFormatIsRGB(format) && !imageFormatIsBGR(format) && !imageFormatIsGray(format) )
return hipErrorInvalidValue;
int overlayWidth = inputWidth;
int overlayHeight = inputHeight;
if( x + overlayWidth >= outputWidth )
overlayWidth = outputWidth - x;
if( y + overlayHeight >= outputHeight )
overlayHeight = outputHeight - y;
const dim3 blockDim(8, 8);
const dim3 gridDim(iDivUp(overlayWidth,blockDim.x), iDivUp(overlayHeight,blockDim.y));
#define launch_overlay(type) \
hipLaunchKernelGGL(( gpuOverlay<type>), dim3(gridDim), dim3(blockDim), 0, 0, (type*)input, inputWidth, (type*)output, outputWidth, outputHeight, x, y)
if( format == IMAGE_RGB8 || format == IMAGE_BGR8 )
launch_overlay(uchar3);
else if( format == IMAGE_RGBA8 || format == IMAGE_BGRA8 )
launch_overlay(uchar4);
else if( format == IMAGE_RGB32F || format == IMAGE_BGR32F )
launch_overlay(float3);
else if( format == IMAGE_RGBA32F || format == IMAGE_BGRA32F )
launch_overlay(float4);
else if( format == IMAGE_GRAY8 )
launch_overlay(uint8_t);
else if( format == IMAGE_GRAY32F )
launch_overlay(float);
return hipGetLastError();
}
//----------------------------------------------------------------------------
template<typename T>
__global__ void gpuRectFill( T* input, T* output, int width, int height,
float4* rects, int numRects, float4 color )
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if( x >= width || y >= height )
return;
T px = input[ y * width + x ];
const float fx = x;
const float fy = y;
const float alpha = color.w / 255.0f;
const float ialph = 1.0f - alpha;
for( int nr=0; nr < numRects; nr++ )
{
const float4 r = rects[nr];
if( fy >= r.y && fy <= r.w && fx >= r.x && fx <= r.z )
{
px.x = alpha * color.x + ialph * px.x;
px.y = alpha * color.y + ialph * px.y;
px.z = alpha * color.z + ialph * px.z;
}
}
output[y * width + x] = px;
}
template<typename T>
__global__ void gpuRectFillBox( T* input, T* output, int imgWidth, int imgHeight, int x0, int y0, int boxWidth, int boxHeight, const float4 color )
{
const int box_x = blockIdx.x * blockDim.x + threadIdx.x;
const int box_y = blockIdx.y * blockDim.y + threadIdx.y;
if( box_x >= boxWidth || box_y >= boxHeight )
return;
const int x = box_x + x0;
const int y = box_y + y0;
if( x >= imgWidth || y >= imgHeight )
return;
T px = input[ y * imgWidth + x ];
const float alpha = color.w / 255.0f;
const float ialph = 1.0f - alpha;
px.x = alpha * color.x + ialph * px.x;
px.y = alpha * color.y + ialph * px.y;
px.z = alpha * color.z + ialph * px.z;
output[y * imgWidth + x] = px;
}
template<typename T>
hipError_t launchRectFill( T* input, T* output, size_t width, size_t height, float4* rects, int numRects, const float4& color )
{
if( !input || !output || width == 0 || height == 0 || !rects || numRects == 0 )
return hipErrorInvalidValue;
// if input and output are the same image, then we can use the faster method
// which draws 1 box per kernel, but doesn't copy pixels that aren't inside boxes
if( input == output )
{
for( int n=0; n < numRects; n++ )
{
const int boxWidth = (int)(rects[n].z - rects[n].x);
const int boxHeight = (int)(rects[n].w - rects[n].y);
// launch kernel
const dim3 blockDim(8, 8);
const dim3 gridDim(iDivUp(boxWidth,blockDim.x), iDivUp(boxHeight,blockDim.y));
hipLaunchKernelGGL(( gpuRectFillBox<T>), dim3(gridDim), dim3(blockDim), 0, 0, input, output, width, height, (int)rects[n].x, (int)rects[n].y, boxWidth, boxHeight, color);
}
}
else
{
// launch kernel
const dim3 blockDim(8, 8);
const dim3 gridDim(iDivUp(width,blockDim.x), iDivUp(height,blockDim.y));
hipLaunchKernelGGL(( gpuRectFill<T>), dim3(gridDim), dim3(blockDim), 0, 0, input, output, width, height, rects, numRects, color);
}
return hipGetLastError();
}
// cudaRectFill
hipError_t cudaRectFill( void* input, void* output, size_t width, size_t height, imageFormat format, float4* rects, int numRects, const float4& color )
{
if( !input || !output || width == 0 || height == 0 || !rects || numRects == 0 )
return hipErrorInvalidValue;
// launch kernel
const dim3 blockDim(8, 8);
const dim3 gridDim(iDivUp(width,blockDim.x), iDivUp(height,blockDim.y));
if( format == IMAGE_RGB8 )
return launchRectFill<uchar3>((uchar3*)input, (uchar3*)output, width, height, rects, numRects, color);
else if( format == IMAGE_RGBA8 )
return launchRectFill<uchar4>((uchar4*)input, (uchar4*)output, width, height, rects, numRects, color);
else if( format == IMAGE_RGB32F )
return launchRectFill<float3>((float3*)input, (float3*)output, width, height, rects, numRects, color);
else if( format == IMAGE_RGBA32F )
return launchRectFill<float4>((float4*)input, (float4*)output, width, height, rects, numRects, color);
else
return hipErrorInvalidValue;
}
| fb494db606ac9289f18c30ec559182ebe6816ce6.cu | /*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "cudaOverlay.h"
// cudaOverlay
template<typename T>
__global__ void gpuOverlay( T* input, int inputWidth, T* output, int outputWidth, int outputHeight, int x0, int y0 )
{
const int input_x = blockIdx.x * blockDim.x + threadIdx.x;
const int input_y = blockIdx.y * blockDim.y + threadIdx.y;
const int x = input_x + x0;
const int y = input_y + y0;
if( x >= outputWidth || y >= outputHeight )
return;
output[y * outputWidth + x] = input[input_y * inputWidth + input_x];
}
cudaError_t cudaOverlay( void* input, size_t inputWidth, size_t inputHeight,
void* output, size_t outputWidth, size_t outputHeight,
imageFormat format, int x, int y )
{
if( !input || !output || inputWidth == 0 || inputHeight == 0 || outputWidth == 0 || outputHeight == 0 )
return cudaErrorInvalidValue;
if( x < 0 || y < 0 || x >= outputWidth || y >= outputHeight )
return cudaErrorInvalidValue;
if( !imageFormatIsRGB(format) && !imageFormatIsBGR(format) && !imageFormatIsGray(format) )
return cudaErrorInvalidValue;
int overlayWidth = inputWidth;
int overlayHeight = inputHeight;
if( x + overlayWidth >= outputWidth )
overlayWidth = outputWidth - x;
if( y + overlayHeight >= outputHeight )
overlayHeight = outputHeight - y;
const dim3 blockDim(8, 8);
const dim3 gridDim(iDivUp(overlayWidth,blockDim.x), iDivUp(overlayHeight,blockDim.y));
#define launch_overlay(type) \
gpuOverlay<type><<<gridDim, blockDim>>>((type*)input, inputWidth, (type*)output, outputWidth, outputHeight, x, y)
if( format == IMAGE_RGB8 || format == IMAGE_BGR8 )
launch_overlay(uchar3);
else if( format == IMAGE_RGBA8 || format == IMAGE_BGRA8 )
launch_overlay(uchar4);
else if( format == IMAGE_RGB32F || format == IMAGE_BGR32F )
launch_overlay(float3);
else if( format == IMAGE_RGBA32F || format == IMAGE_BGRA32F )
launch_overlay(float4);
else if( format == IMAGE_GRAY8 )
launch_overlay(uint8_t);
else if( format == IMAGE_GRAY32F )
launch_overlay(float);
return cudaGetLastError();
}
//----------------------------------------------------------------------------
template<typename T>
__global__ void gpuRectFill( T* input, T* output, int width, int height,
float4* rects, int numRects, float4 color )
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if( x >= width || y >= height )
return;
T px = input[ y * width + x ];
const float fx = x;
const float fy = y;
const float alpha = color.w / 255.0f;
const float ialph = 1.0f - alpha;
for( int nr=0; nr < numRects; nr++ )
{
const float4 r = rects[nr];
if( fy >= r.y && fy <= r.w && fx >= r.x && fx <= r.z )
{
px.x = alpha * color.x + ialph * px.x;
px.y = alpha * color.y + ialph * px.y;
px.z = alpha * color.z + ialph * px.z;
}
}
output[y * width + x] = px;
}
template<typename T>
__global__ void gpuRectFillBox( T* input, T* output, int imgWidth, int imgHeight, int x0, int y0, int boxWidth, int boxHeight, const float4 color )
{
const int box_x = blockIdx.x * blockDim.x + threadIdx.x;
const int box_y = blockIdx.y * blockDim.y + threadIdx.y;
if( box_x >= boxWidth || box_y >= boxHeight )
return;
const int x = box_x + x0;
const int y = box_y + y0;
if( x >= imgWidth || y >= imgHeight )
return;
T px = input[ y * imgWidth + x ];
const float alpha = color.w / 255.0f;
const float ialph = 1.0f - alpha;
px.x = alpha * color.x + ialph * px.x;
px.y = alpha * color.y + ialph * px.y;
px.z = alpha * color.z + ialph * px.z;
output[y * imgWidth + x] = px;
}
template<typename T>
cudaError_t launchRectFill( T* input, T* output, size_t width, size_t height, float4* rects, int numRects, const float4& color )
{
if( !input || !output || width == 0 || height == 0 || !rects || numRects == 0 )
return cudaErrorInvalidValue;
// if input and output are the same image, then we can use the faster method
// which draws 1 box per kernel, but doesn't copy pixels that aren't inside boxes
if( input == output )
{
for( int n=0; n < numRects; n++ )
{
const int boxWidth = (int)(rects[n].z - rects[n].x);
const int boxHeight = (int)(rects[n].w - rects[n].y);
// launch kernel
const dim3 blockDim(8, 8);
const dim3 gridDim(iDivUp(boxWidth,blockDim.x), iDivUp(boxHeight,blockDim.y));
gpuRectFillBox<T><<<gridDim, blockDim>>>(input, output, width, height, (int)rects[n].x, (int)rects[n].y, boxWidth, boxHeight, color);
}
}
else
{
// launch kernel
const dim3 blockDim(8, 8);
const dim3 gridDim(iDivUp(width,blockDim.x), iDivUp(height,blockDim.y));
gpuRectFill<T><<<gridDim, blockDim>>>(input, output, width, height, rects, numRects, color);
}
return cudaGetLastError();
}
// cudaRectFill
cudaError_t cudaRectFill( void* input, void* output, size_t width, size_t height, imageFormat format, float4* rects, int numRects, const float4& color )
{
if( !input || !output || width == 0 || height == 0 || !rects || numRects == 0 )
return cudaErrorInvalidValue;
// launch kernel
const dim3 blockDim(8, 8);
const dim3 gridDim(iDivUp(width,blockDim.x), iDivUp(height,blockDim.y));
if( format == IMAGE_RGB8 )
return launchRectFill<uchar3>((uchar3*)input, (uchar3*)output, width, height, rects, numRects, color);
else if( format == IMAGE_RGBA8 )
return launchRectFill<uchar4>((uchar4*)input, (uchar4*)output, width, height, rects, numRects, color);
else if( format == IMAGE_RGB32F )
return launchRectFill<float3>((float3*)input, (float3*)output, width, height, rects, numRects, color);
else if( format == IMAGE_RGBA32F )
return launchRectFill<float4>((float4*)input, (float4*)output, width, height, rects, numRects, color);
else
return cudaErrorInvalidValue;
}
|
67c37874b2723b232fb2e6a1749559358b1dced6.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
extern "C" __global__ void mandelbrot_ker(float* lattice, float* mandelbrot_graph, int max_iters, float upper_bound_squared, int lattice_size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < lattice_size * lattice_size)
{
int i = tid % lattice_size;
int j = lattice_size - 1 - (tid / lattice_size);
float c_re = lattice[i];
float c_im = lattice[j];
float z_re = 0.0f;
float z_im = 0.0f;
mandelbrot_graph[tid] = 1;
for(int k = 0; k < max_iters; k++)
{
float temp;
temp = z_re * z_re - z_im * z_im + c_re;
z_im = 2 * z_re * z_im + c_im;
z_re = temp;
if ( (z_re * z_re + z_im * z_im) > upper_bound_squared)
{
mandelbrot_graph[tid] = 0;
break;
}
}
}
return;
}
extern "C" void launch_mandelbrot(float* lattice, float* mandelbrot_graph, int max_iters, float upper_bound, int lattice_size)
{
int num_bytes_lattice = sizeof(float) * lattice_size;
int num_bytes_graph = sizeof(float) * lattice_size * lattice_size;
float * d_lattice;
float * d_mandelbrot_graph;
hipMalloc((float**) &d_lattice, num_bytes_lattice);
hipMalloc((float**) &d_mandelbrot_graph, num_bytes_graph);
hipMemcpy(d_lattice, lattice, num_bytes_lattice, hipMemcpyHostToDevice);
int grid_size = (int) ceil(((double) lattice_size * lattice_size) / ((double) 32));
hipLaunchKernelGGL(( mandelbrot_ker) , dim3(grid_size), dim3(32), 0, 0, d_lattice, d_mandelbrot_graph, max_iters, upper_bound * upper_bound, lattice_size);
hipMemcpy(mandelbrot_graph, d_mandelbrot_graph, num_bytes_graph, hipMemcpyDeviceToHost);
hipFree(d_lattice);
hipFree(d_mandelbrot_graph);
}
| 67c37874b2723b232fb2e6a1749559358b1dced6.cu | #include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
extern "C" __global__ void mandelbrot_ker(float* lattice, float* mandelbrot_graph, int max_iters, float upper_bound_squared, int lattice_size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < lattice_size * lattice_size)
{
int i = tid % lattice_size;
int j = lattice_size - 1 - (tid / lattice_size);
float c_re = lattice[i];
float c_im = lattice[j];
float z_re = 0.0f;
float z_im = 0.0f;
mandelbrot_graph[tid] = 1;
for(int k = 0; k < max_iters; k++)
{
float temp;
temp = z_re * z_re - z_im * z_im + c_re;
z_im = 2 * z_re * z_im + c_im;
z_re = temp;
if ( (z_re * z_re + z_im * z_im) > upper_bound_squared)
{
mandelbrot_graph[tid] = 0;
break;
}
}
}
return;
}
extern "C" void launch_mandelbrot(float* lattice, float* mandelbrot_graph, int max_iters, float upper_bound, int lattice_size)
{
int num_bytes_lattice = sizeof(float) * lattice_size;
int num_bytes_graph = sizeof(float) * lattice_size * lattice_size;
float * d_lattice;
float * d_mandelbrot_graph;
cudaMalloc((float**) &d_lattice, num_bytes_lattice);
cudaMalloc((float**) &d_mandelbrot_graph, num_bytes_graph);
cudaMemcpy(d_lattice, lattice, num_bytes_lattice, cudaMemcpyHostToDevice);
int grid_size = (int) ceil(((double) lattice_size * lattice_size) / ((double) 32));
mandelbrot_ker <<<grid_size, 32>>> (d_lattice, d_mandelbrot_graph, max_iters, upper_bound * upper_bound, lattice_size);
cudaMemcpy(mandelbrot_graph, d_mandelbrot_graph, num_bytes_graph, cudaMemcpyDeviceToHost);
cudaFree(d_lattice);
cudaFree(d_mandelbrot_graph);
}
|
5fc2bd4bda33dd4fffdfc14d63a42be315325fe0.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <cuda_utils.cuh>
#include <vector>
#include <cuml/cluster/dbscan.hpp>
#include <cuml/common/cuml_allocator.hpp>
#include <cuml/cuml.hpp>
#include <cuml/datasets/make_blobs.hpp>
#include <cuml/metrics/metrics.hpp>
#include "linalg/cublas_wrappers.h"
#include "linalg/transpose.h"
#include "ml_utils.h"
#include "test_utils.h"
#include <cuml/common/logger.hpp>
#include "common/device_buffer.hpp"
namespace ML {
using namespace MLCommon;
using namespace Datasets;
using namespace Metrics;
using namespace std;
template <typename T, typename IdxT>
struct DbscanInputs {
IdxT n_row;
IdxT n_col;
IdxT n_centers;
T cluster_std;
T eps;
int min_pts;
size_t max_bytes_per_batch;
unsigned long long int seed;
};
template <typename T, typename IdxT>
::std::ostream &operator<<(::std::ostream &os,
const DbscanInputs<T, IdxT> &dims) {
return os;
}
template <typename T, typename IdxT>
class DbscanTest : public ::testing::TestWithParam<DbscanInputs<T, IdxT>> {
protected:
void basicTest() {
cumlHandle handle;
params = ::testing::TestWithParam<DbscanInputs<T, IdxT>>::GetParam();
device_buffer<T> out(handle.getDeviceAllocator(), handle.getStream(),
params.n_row * params.n_col);
device_buffer<IdxT> l(handle.getDeviceAllocator(), handle.getStream(),
params.n_row);
make_blobs(handle, out.data(), l.data(), params.n_row, params.n_col,
params.n_centers, nullptr, nullptr, params.cluster_std, true,
-10.0f, 10.0f, 1234ULL);
allocate(labels, params.n_row);
allocate(labels_ref, params.n_row);
MLCommon::copy(labels_ref, l.data(), params.n_row, handle.getStream());
CUDA_CHECK(hipStreamSynchronize(handle.getStream()));
dbscanFit(handle, out.data(), params.n_row, params.n_col, params.eps,
params.min_pts, labels, params.max_bytes_per_batch);
CUDA_CHECK(hipStreamSynchronize(handle.getStream()));
score = adjustedRandIndex(handle, labels_ref, labels, params.n_row);
if (score < 1.0) {
auto str = arr2Str(labels_ref, 25, "labels_ref", handle.getStream());
CUML_LOG_DEBUG("y: %s", str.c_str());
str = arr2Str(labels, 25, "labels", handle.getStream());
CUML_LOG_DEBUG("y_hat: %s", str.c_str());
CUML_LOG_DEBUG("Score = %lf", score);
}
}
void SetUp() override { basicTest(); }
void TearDown() override {
CUDA_CHECK(hipFree(labels));
CUDA_CHECK(hipFree(labels_ref));
}
protected:
DbscanInputs<T, IdxT> params;
IdxT *labels, *labels_ref;
double score;
};
const std::vector<DbscanInputs<float, int>> inputsf2 = {
{50000, 16, 5, 0.01, 2, 2, (size_t)13e3, 1234ULL},
{500, 16, 5, 0.01, 2, 2, (size_t)100, 1234ULL},
{1000, 1000, 10, 0.01, 2, 2, (size_t)13e3, 1234ULL},
{50000, 16, 5l, 0.01, 2, 2, (size_t)13e3, 1234ULL},
{20000, 10000, 10, 0.01, 2, 2, (size_t)13e3, 1234ULL},
{20000, 100, 5000, 0.01, 2, 2, (size_t)13e3, 1234ULL}};
const std::vector<DbscanInputs<float, int64_t>> inputsf3 = {
{50000, 16, 5, 0.01, 2, 2, (size_t)9e3, 1234ULL},
{500, 16, 5, 0.01, 2, 2, (size_t)100, 1234ULL},
{1000, 1000, 10, 0.01, 2, 2, (size_t)9e3, 1234ULL},
{50000, 16, 5l, 0.01, 2, 2, (size_t)9e3, 1234ULL},
{20000, 10000, 10, 0.01, 2, 2, (size_t)9e3, 1234ULL},
{20000, 100, 5000, 0.01, 2, 2, (size_t)9e3, 1234ULL}};
const std::vector<DbscanInputs<double, int>> inputsd2 = {
{50000, 16, 5, 0.01, 2, 2, (size_t)13e3, 1234ULL},
{500, 16, 5, 0.01, 2, 2, (size_t)100, 1234ULL},
{1000, 1000, 10, 0.01, 2, 2, (size_t)13e3, 1234ULL},
{100, 10000, 10, 0.01, 2, 2, (size_t)13e3, 1234ULL},
{20000, 10000, 10, 0.01, 2, 2, (size_t)13e3, 1234ULL},
{20000, 100, 5000, 0.01, 2, 2, (size_t)13e3, 1234ULL}};
const std::vector<DbscanInputs<double, int64_t>> inputsd3 = {
{50000, 16, 5, 0.01, 2, 2, (size_t)9e3, 1234ULL},
{500, 16, 5, 0.01, 2, 2, (size_t)100, 1234ULL},
{1000, 1000, 10, 0.01, 2, 2, (size_t)9e3, 1234ULL},
{100, 10000, 10, 0.01, 2, 2, (size_t)9e3, 1234ULL},
{20000, 10000, 10, 0.01, 2, 2, (size_t)9e3, 1234ULL},
{20000, 100, 5000, 0.01, 2, 2, (size_t)9e3, 1234ULL}};
typedef DbscanTest<float, int> DbscanTestF_Int;
TEST_P(DbscanTestF_Int, Result) { ASSERT_TRUE(score == 1.0); }
typedef DbscanTest<float, int64_t> DbscanTestF_Int64;
TEST_P(DbscanTestF_Int64, Result) { ASSERT_TRUE(score == 1.0); }
typedef DbscanTest<double, int> DbscanTestD_Int;
TEST_P(DbscanTestD_Int, Result) { ASSERT_TRUE(score == 1.0); }
typedef DbscanTest<double, int64_t> DbscanTestD_Int64;
TEST_P(DbscanTestD_Int64, Result) { ASSERT_TRUE(score == 1.0); }
INSTANTIATE_TEST_CASE_P(DbscanTests, DbscanTestF_Int,
::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(DbscanTests, DbscanTestF_Int64,
::testing::ValuesIn(inputsf3));
INSTANTIATE_TEST_CASE_P(DbscanTests, DbscanTestD_Int,
::testing::ValuesIn(inputsd2));
INSTANTIATE_TEST_CASE_P(DbscanTests, DbscanTestD_Int64,
::testing::ValuesIn(inputsd3));
template <typename T>
struct DBScan2DArrayInputs {
const T *points;
const int *out;
size_t n_row;
// n_out allows to compare less labels than we have inputs
// (some output labels can be ambiguous)
size_t n_out;
T eps;
int min_pts;
};
template <typename T>
class Dbscan2DSimple : public ::testing::TestWithParam<DBScan2DArrayInputs<T>> {
protected:
void basicTest() {
cumlHandle handle;
params = ::testing::TestWithParam<DBScan2DArrayInputs<T>>::GetParam();
allocate(inputs, params.n_row * 2);
allocate(labels, params.n_row);
allocate(labels_ref, params.n_out);
MLCommon::copy(inputs, params.points, params.n_row * 2, handle.getStream());
MLCommon::copy(labels_ref, params.out, params.n_out, handle.getStream());
CUDA_CHECK(hipStreamSynchronize(handle.getStream()));
dbscanFit(handle, inputs, (int)params.n_row, 2, params.eps, params.min_pts,
labels);
CUDA_CHECK(hipStreamSynchronize(handle.getStream()));
score = adjustedRandIndex(handle, labels_ref, labels, (int)params.n_out);
if (score < 1.0) {
auto str =
arr2Str(labels_ref, params.n_out, "labels_ref", handle.getStream());
CUML_LOG_DEBUG("y: %s", str.c_str());
str = arr2Str(labels, params.n_row, "labels", handle.getStream());
CUML_LOG_DEBUG("y_hat: %s", str.c_str());
CUML_LOG_DEBUG("Score = %lf", score);
}
}
void SetUp() override { basicTest(); }
void TearDown() override {
CUDA_CHECK(hipFree(labels_ref));
CUDA_CHECK(hipFree(labels));
CUDA_CHECK(hipFree(inputs));
}
protected:
DBScan2DArrayInputs<T> params;
int *labels, *labels_ref;
T *inputs;
double score;
};
// The input looks like a latin cross or a star with a chain:
// .
// . . . . .
// .
// There is 1 core-point (intersection of the bars)
// and the two points to the very right are not reachable from it
// So there should be one cluster (the plus/star on the left)
// and two noise points
const std::vector<float> test2d1_f = {0, 0, 1, 0, 1, 1, 1,
-1, 2, 0, 3, 0, 4, 0};
const std::vector<double> test2d1_d(test2d1_f.begin(), test2d1_f.end());
const std::vector<int> test2d1_l = {0, 0, 0, 0, 0, -1, -1};
// The input looks like a long two-barred (orhodox) cross or
// two stars next to each other:
// . .
// . . . . . .
// . .
// There are 2 core-points but they are not reachable from each other
// So there should be two clusters, both in the form of a plus/star
const std::vector<float> test2d2_f = {0, 0, 1, 0, 1, 1, 1, -1, 2, 0,
3, 0, 4, 0, 4, 1, 4, -1, 5, 0};
const std::vector<double> test2d2_d(test2d2_f.begin(), test2d2_f.end());
const std::vector<int> test2d2_l = {0, 0, 0, 0, 0, 1, 1, 1, 1, 1};
// The input looks like a two-barred (orhodox) cross or
// two stars sharing a link:
// . .
// . . . . .
// . .
// There are 2 core-points but they are not reachable from each other
// So there should be two clusters.
// However, the link that is shared between the stars
// actually has an ambiguous label (to the best of my knowledge)
// as it will depend on the order in which we process the core-points.
// Note that there are 9 input points, but only 8 labels for this reason
const std::vector<float> test2d3_f = {
0, 0, 1, 0, 1, 1, 1, -1, 3, 0, 3, 1, 3, -1, 4, 0, 2, 0,
};
const std::vector<double> test2d3_d(test2d3_f.begin(), test2d3_f.end());
const std::vector<int> test2d3_l = {0, 0, 0, 0, 1, 1, 1, 1};
const std::vector<DBScan2DArrayInputs<float>> inputs2d_f = {
{test2d1_f.data(), test2d1_l.data(), test2d1_f.size() / 2, test2d1_l.size(),
1.1f, 4},
{test2d2_f.data(), test2d2_l.data(), test2d2_f.size() / 2, test2d2_l.size(),
1.1f, 4},
{test2d3_f.data(), test2d3_l.data(), test2d3_f.size() / 2, test2d3_l.size(),
1.1f, 4},
};
const std::vector<DBScan2DArrayInputs<double>> inputs2d_d = {
{test2d1_d.data(), test2d1_l.data(), test2d1_d.size() / 2, test2d1_l.size(),
1.1, 4},
{test2d2_d.data(), test2d2_l.data(), test2d2_d.size() / 2, test2d2_l.size(),
1.1, 4},
{test2d3_d.data(), test2d3_l.data(), test2d3_d.size() / 2, test2d3_l.size(),
1.1, 4},
};
typedef Dbscan2DSimple<float> Dbscan2DSimple_F;
TEST_P(Dbscan2DSimple_F, Result) { ASSERT_TRUE(score == 1.0); }
typedef Dbscan2DSimple<double> Dbscan2DSimple_D;
TEST_P(Dbscan2DSimple_D, Result) { ASSERT_TRUE(score == 1.0); }
INSTANTIATE_TEST_CASE_P(DbscanTests, Dbscan2DSimple_F,
::testing::ValuesIn(inputs2d_f));
INSTANTIATE_TEST_CASE_P(DbscanTests, Dbscan2DSimple_D,
::testing::ValuesIn(inputs2d_d));
} // end namespace ML
| 5fc2bd4bda33dd4fffdfc14d63a42be315325fe0.cu | /*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <cuda_utils.cuh>
#include <vector>
#include <cuml/cluster/dbscan.hpp>
#include <cuml/common/cuml_allocator.hpp>
#include <cuml/cuml.hpp>
#include <cuml/datasets/make_blobs.hpp>
#include <cuml/metrics/metrics.hpp>
#include "linalg/cublas_wrappers.h"
#include "linalg/transpose.h"
#include "ml_utils.h"
#include "test_utils.h"
#include <cuml/common/logger.hpp>
#include "common/device_buffer.hpp"
namespace ML {
using namespace MLCommon;
using namespace Datasets;
using namespace Metrics;
using namespace std;
template <typename T, typename IdxT>
struct DbscanInputs {
IdxT n_row;
IdxT n_col;
IdxT n_centers;
T cluster_std;
T eps;
int min_pts;
size_t max_bytes_per_batch;
unsigned long long int seed;
};
template <typename T, typename IdxT>
::std::ostream &operator<<(::std::ostream &os,
const DbscanInputs<T, IdxT> &dims) {
return os;
}
template <typename T, typename IdxT>
class DbscanTest : public ::testing::TestWithParam<DbscanInputs<T, IdxT>> {
protected:
void basicTest() {
cumlHandle handle;
params = ::testing::TestWithParam<DbscanInputs<T, IdxT>>::GetParam();
device_buffer<T> out(handle.getDeviceAllocator(), handle.getStream(),
params.n_row * params.n_col);
device_buffer<IdxT> l(handle.getDeviceAllocator(), handle.getStream(),
params.n_row);
make_blobs(handle, out.data(), l.data(), params.n_row, params.n_col,
params.n_centers, nullptr, nullptr, params.cluster_std, true,
-10.0f, 10.0f, 1234ULL);
allocate(labels, params.n_row);
allocate(labels_ref, params.n_row);
MLCommon::copy(labels_ref, l.data(), params.n_row, handle.getStream());
CUDA_CHECK(cudaStreamSynchronize(handle.getStream()));
dbscanFit(handle, out.data(), params.n_row, params.n_col, params.eps,
params.min_pts, labels, params.max_bytes_per_batch);
CUDA_CHECK(cudaStreamSynchronize(handle.getStream()));
score = adjustedRandIndex(handle, labels_ref, labels, params.n_row);
if (score < 1.0) {
auto str = arr2Str(labels_ref, 25, "labels_ref", handle.getStream());
CUML_LOG_DEBUG("y: %s", str.c_str());
str = arr2Str(labels, 25, "labels", handle.getStream());
CUML_LOG_DEBUG("y_hat: %s", str.c_str());
CUML_LOG_DEBUG("Score = %lf", score);
}
}
void SetUp() override { basicTest(); }
void TearDown() override {
CUDA_CHECK(cudaFree(labels));
CUDA_CHECK(cudaFree(labels_ref));
}
protected:
DbscanInputs<T, IdxT> params;
IdxT *labels, *labels_ref;
double score;
};
const std::vector<DbscanInputs<float, int>> inputsf2 = {
{50000, 16, 5, 0.01, 2, 2, (size_t)13e3, 1234ULL},
{500, 16, 5, 0.01, 2, 2, (size_t)100, 1234ULL},
{1000, 1000, 10, 0.01, 2, 2, (size_t)13e3, 1234ULL},
{50000, 16, 5l, 0.01, 2, 2, (size_t)13e3, 1234ULL},
{20000, 10000, 10, 0.01, 2, 2, (size_t)13e3, 1234ULL},
{20000, 100, 5000, 0.01, 2, 2, (size_t)13e3, 1234ULL}};
const std::vector<DbscanInputs<float, int64_t>> inputsf3 = {
{50000, 16, 5, 0.01, 2, 2, (size_t)9e3, 1234ULL},
{500, 16, 5, 0.01, 2, 2, (size_t)100, 1234ULL},
{1000, 1000, 10, 0.01, 2, 2, (size_t)9e3, 1234ULL},
{50000, 16, 5l, 0.01, 2, 2, (size_t)9e3, 1234ULL},
{20000, 10000, 10, 0.01, 2, 2, (size_t)9e3, 1234ULL},
{20000, 100, 5000, 0.01, 2, 2, (size_t)9e3, 1234ULL}};
const std::vector<DbscanInputs<double, int>> inputsd2 = {
{50000, 16, 5, 0.01, 2, 2, (size_t)13e3, 1234ULL},
{500, 16, 5, 0.01, 2, 2, (size_t)100, 1234ULL},
{1000, 1000, 10, 0.01, 2, 2, (size_t)13e3, 1234ULL},
{100, 10000, 10, 0.01, 2, 2, (size_t)13e3, 1234ULL},
{20000, 10000, 10, 0.01, 2, 2, (size_t)13e3, 1234ULL},
{20000, 100, 5000, 0.01, 2, 2, (size_t)13e3, 1234ULL}};
const std::vector<DbscanInputs<double, int64_t>> inputsd3 = {
{50000, 16, 5, 0.01, 2, 2, (size_t)9e3, 1234ULL},
{500, 16, 5, 0.01, 2, 2, (size_t)100, 1234ULL},
{1000, 1000, 10, 0.01, 2, 2, (size_t)9e3, 1234ULL},
{100, 10000, 10, 0.01, 2, 2, (size_t)9e3, 1234ULL},
{20000, 10000, 10, 0.01, 2, 2, (size_t)9e3, 1234ULL},
{20000, 100, 5000, 0.01, 2, 2, (size_t)9e3, 1234ULL}};
typedef DbscanTest<float, int> DbscanTestF_Int;
TEST_P(DbscanTestF_Int, Result) { ASSERT_TRUE(score == 1.0); }
typedef DbscanTest<float, int64_t> DbscanTestF_Int64;
TEST_P(DbscanTestF_Int64, Result) { ASSERT_TRUE(score == 1.0); }
typedef DbscanTest<double, int> DbscanTestD_Int;
TEST_P(DbscanTestD_Int, Result) { ASSERT_TRUE(score == 1.0); }
typedef DbscanTest<double, int64_t> DbscanTestD_Int64;
TEST_P(DbscanTestD_Int64, Result) { ASSERT_TRUE(score == 1.0); }
INSTANTIATE_TEST_CASE_P(DbscanTests, DbscanTestF_Int,
::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(DbscanTests, DbscanTestF_Int64,
::testing::ValuesIn(inputsf3));
INSTANTIATE_TEST_CASE_P(DbscanTests, DbscanTestD_Int,
::testing::ValuesIn(inputsd2));
INSTANTIATE_TEST_CASE_P(DbscanTests, DbscanTestD_Int64,
::testing::ValuesIn(inputsd3));
template <typename T>
struct DBScan2DArrayInputs {
const T *points;
const int *out;
size_t n_row;
// n_out allows to compare less labels than we have inputs
// (some output labels can be ambiguous)
size_t n_out;
T eps;
int min_pts;
};
template <typename T>
class Dbscan2DSimple : public ::testing::TestWithParam<DBScan2DArrayInputs<T>> {
protected:
void basicTest() {
cumlHandle handle;
params = ::testing::TestWithParam<DBScan2DArrayInputs<T>>::GetParam();
allocate(inputs, params.n_row * 2);
allocate(labels, params.n_row);
allocate(labels_ref, params.n_out);
MLCommon::copy(inputs, params.points, params.n_row * 2, handle.getStream());
MLCommon::copy(labels_ref, params.out, params.n_out, handle.getStream());
CUDA_CHECK(cudaStreamSynchronize(handle.getStream()));
dbscanFit(handle, inputs, (int)params.n_row, 2, params.eps, params.min_pts,
labels);
CUDA_CHECK(cudaStreamSynchronize(handle.getStream()));
score = adjustedRandIndex(handle, labels_ref, labels, (int)params.n_out);
if (score < 1.0) {
auto str =
arr2Str(labels_ref, params.n_out, "labels_ref", handle.getStream());
CUML_LOG_DEBUG("y: %s", str.c_str());
str = arr2Str(labels, params.n_row, "labels", handle.getStream());
CUML_LOG_DEBUG("y_hat: %s", str.c_str());
CUML_LOG_DEBUG("Score = %lf", score);
}
}
void SetUp() override { basicTest(); }
void TearDown() override {
CUDA_CHECK(cudaFree(labels_ref));
CUDA_CHECK(cudaFree(labels));
CUDA_CHECK(cudaFree(inputs));
}
protected:
DBScan2DArrayInputs<T> params;
int *labels, *labels_ref;
T *inputs;
double score;
};
// The input looks like a latin cross or a star with a chain:
// .
// . . . . .
// .
// There is 1 core-point (intersection of the bars)
// and the two points to the very right are not reachable from it
// So there should be one cluster (the plus/star on the left)
// and two noise points
const std::vector<float> test2d1_f = {0, 0, 1, 0, 1, 1, 1,
-1, 2, 0, 3, 0, 4, 0};
const std::vector<double> test2d1_d(test2d1_f.begin(), test2d1_f.end());
const std::vector<int> test2d1_l = {0, 0, 0, 0, 0, -1, -1};
// The input looks like a long two-barred (orhodox) cross or
// two stars next to each other:
// . .
// . . . . . .
// . .
// There are 2 core-points but they are not reachable from each other
// So there should be two clusters, both in the form of a plus/star
const std::vector<float> test2d2_f = {0, 0, 1, 0, 1, 1, 1, -1, 2, 0,
3, 0, 4, 0, 4, 1, 4, -1, 5, 0};
const std::vector<double> test2d2_d(test2d2_f.begin(), test2d2_f.end());
const std::vector<int> test2d2_l = {0, 0, 0, 0, 0, 1, 1, 1, 1, 1};
// The input looks like a two-barred (orhodox) cross or
// two stars sharing a link:
// . .
// . . . . .
// . .
// There are 2 core-points but they are not reachable from each other
// So there should be two clusters.
// However, the link that is shared between the stars
// actually has an ambiguous label (to the best of my knowledge)
// as it will depend on the order in which we process the core-points.
// Note that there are 9 input points, but only 8 labels for this reason
const std::vector<float> test2d3_f = {
0, 0, 1, 0, 1, 1, 1, -1, 3, 0, 3, 1, 3, -1, 4, 0, 2, 0,
};
const std::vector<double> test2d3_d(test2d3_f.begin(), test2d3_f.end());
const std::vector<int> test2d3_l = {0, 0, 0, 0, 1, 1, 1, 1};
const std::vector<DBScan2DArrayInputs<float>> inputs2d_f = {
{test2d1_f.data(), test2d1_l.data(), test2d1_f.size() / 2, test2d1_l.size(),
1.1f, 4},
{test2d2_f.data(), test2d2_l.data(), test2d2_f.size() / 2, test2d2_l.size(),
1.1f, 4},
{test2d3_f.data(), test2d3_l.data(), test2d3_f.size() / 2, test2d3_l.size(),
1.1f, 4},
};
const std::vector<DBScan2DArrayInputs<double>> inputs2d_d = {
{test2d1_d.data(), test2d1_l.data(), test2d1_d.size() / 2, test2d1_l.size(),
1.1, 4},
{test2d2_d.data(), test2d2_l.data(), test2d2_d.size() / 2, test2d2_l.size(),
1.1, 4},
{test2d3_d.data(), test2d3_l.data(), test2d3_d.size() / 2, test2d3_l.size(),
1.1, 4},
};
typedef Dbscan2DSimple<float> Dbscan2DSimple_F;
TEST_P(Dbscan2DSimple_F, Result) { ASSERT_TRUE(score == 1.0); }
typedef Dbscan2DSimple<double> Dbscan2DSimple_D;
TEST_P(Dbscan2DSimple_D, Result) { ASSERT_TRUE(score == 1.0); }
INSTANTIATE_TEST_CASE_P(DbscanTests, Dbscan2DSimple_F,
::testing::ValuesIn(inputs2d_f));
INSTANTIATE_TEST_CASE_P(DbscanTests, Dbscan2DSimple_D,
::testing::ValuesIn(inputs2d_d));
} // end namespace ML
|
7ef6c48b9b2273397d8fb77f5135e16544f63d90.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hipfft.h>
#include <hipfftXt.h>
#include <hip/hip_fp16.h>
#include "cufft_benchmark.h"
#define NX (1LL<<23)
#define BATCH 10
half2 *data;
int main()
{
CHECK(hipMalloc((void**)&data, sizeof(half2)*NX*BATCH));
printf("size: %d\n", sizeof(half2)*NX*BATCH);
long long sample_size = NX;
int block_size = BATCH;
size_t workSize = 0;
hipfftHandle plan_inverse;
hipfftCreate(&plan_inverse);
printf("cufftXt plan result: %d\n", cufftXtMakePlanMany(plan_inverse,
1, &sample_size,
NULL, 1, 1, HIP_C_16F,
NULL, 1, 1, HIP_C_16F,
block_size, &workSize, HIP_C_16F));
start_time();
for(int i=0;i<100;i++)
printf("cufftXt execute result: %d\n", cufftXtExec(plan_inverse, data, data, HIPFFT_FORWARD));
hipDeviceSynchronize();
end_time("half16 cufft time cost: ");
hipfftDestroy(plan_inverse);
hipFree(data);
return 0;
}
| 7ef6c48b9b2273397d8fb77f5135e16544f63d90.cu | #include <stdio.h>
#include <cufft.h>
#include <cufftXt.h>
#include <cuda_fp16.h>
#include "cufft_benchmark.h"
#define NX (1LL<<23)
#define BATCH 10
half2 *data;
int main()
{
CHECK(cudaMalloc((void**)&data, sizeof(half2)*NX*BATCH));
printf("size: %d\n", sizeof(half2)*NX*BATCH);
long long sample_size = NX;
int block_size = BATCH;
size_t workSize = 0;
cufftHandle plan_inverse;
cufftCreate(&plan_inverse);
printf("cufftXt plan result: %d\n", cufftXtMakePlanMany(plan_inverse,
1, &sample_size,
NULL, 1, 1, CUDA_C_16F,
NULL, 1, 1, CUDA_C_16F,
block_size, &workSize, CUDA_C_16F));
start_time();
for(int i=0;i<100;i++)
printf("cufftXt execute result: %d\n", cufftXtExec(plan_inverse, data, data, CUFFT_FORWARD));
cudaDeviceSynchronize();
end_time("half16 cufft time cost: ");
cufftDestroy(plan_inverse);
cudaFree(data);
return 0;
}
|
fac79c3ec2b3b0d445f5b80b90b4907a0863b3f0.hip | // !!! This is a file automatically generated by hipify!!!
/*****************************************************************
* Utils for Cuda code
* 1 CUDA ERROR CHECKING CODE
*
* Adapted from:
* https://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api
* https://stackoverflow.com/questions/22399794/qr-decomposition-to-solve-linear-systems-in-cuda
*
* error checking usage for library functions:
* gpuErrchk(hipMalloc((void**)&a_d, size*sizeof(int)));
* cusolveErrchk(<cusolvefunctioncall>)
* cublasErrchk(<cubalsfunctioncall>)
* error checking usage for custom kernels:
* kernel<<<1,1>>>(a);
* gpuErrchk(hipPeekAtLastError());
* gpuErrchk(hipDeviceSynchronize());
*
* 2 Matrix printing code
*
* 3 Templated External Memory Wrapper
*****************************************************************/
#include <stdio.h>
#include <assert.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <rocblas.h>
#include <cusolverDn.h>
#include <cusolverSp.h>
/*** 1 CUDA ERROR CHECKING CODE 1 ***/
__host__
void gpuAssert(hipError_t code, const char *file, const int line, bool abort=true){
if (code != hipSuccess){
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort){hipDeviceReset(); exit(code);}
}
}
__host__ __device__
static const char *cublasGetErrorEnum(hipblasStatus_t error){
switch (error){
case HIPBLAS_STATUS_SUCCESS: return "HIPBLAS_STATUS_SUCCESS";
case HIPBLAS_STATUS_NOT_INITIALIZED: return "HIPBLAS_STATUS_NOT_INITIALIZED";
case HIPBLAS_STATUS_ALLOC_FAILED: return "HIPBLAS_STATUS_ALLOC_FAILED";
case HIPBLAS_STATUS_INVALID_VALUE: return "HIPBLAS_STATUS_INVALID_VALUE";
case HIPBLAS_STATUS_ARCH_MISMATCH: return "HIPBLAS_STATUS_ARCH_MISMATCH";
case HIPBLAS_STATUS_MAPPING_ERROR: return "HIPBLAS_STATUS_MAPPING_ERROR";
case HIPBLAS_STATUS_EXECUTION_FAILED: return "HIPBLAS_STATUS_EXECUTION_FAILED";
case HIPBLAS_STATUS_INTERNAL_ERROR: return "HIPBLAS_STATUS_INTERNAL_ERROR";
case HIPBLAS_STATUS_NOT_SUPPORTED: return "HIPBLAS_STATUS_NOT_SUPPORTED";
case CUBLAS_STATUS_LICENSE_ERROR: return "CUBLAS_STATUS_LICENSE_ERROR";
default: return "<unknown>";
}
}
__host__ __device__
void cublasAssert(hipblasStatus_t err, const char *file, const int line){
if(HIPBLAS_STATUS_SUCCESS != err) {
#ifdef __CUDA_ARCH__
printf("CUBLAS error in file '%s', line %d\n error %d: %s\n terminating!\n",file,line,err,cublasGetErrorEnum(err));
#else
fprintf(stderr, "CUBLAS error in file '%s', line %d\n error %d: %s\n terminating!\n",file,line,err,cublasGetErrorEnum(err));
hipDeviceReset();
#endif
assert(0);
}
}
__host__
static const char *cusolverGetErrorEnum(cusolverStatus_t error){
switch (error){
case CUSOLVER_STATUS_SUCCESS: return "CUSOLVER_SUCCESS";
case CUSOLVER_STATUS_NOT_INITIALIZED: return "CUSOLVER_STATUS_NOT_INITIALIZED";
case CUSOLVER_STATUS_ALLOC_FAILED: return "CUSOLVER_STATUS_ALLOC_FAILED";
case CUSOLVER_STATUS_INVALID_VALUE: return "CUSOLVER_STATUS_INVALID_VALUE";
case CUSOLVER_STATUS_ARCH_MISMATCH: return "CUSOLVER_STATUS_ARCH_MISMATCH";
case CUSOLVER_STATUS_EXECUTION_FAILED: return "CUSOLVER_STATUS_EXECUTION_FAILED";
case CUSOLVER_STATUS_INTERNAL_ERROR: return "CUSOLVER_STATUS_INTERNAL_ERROR";
case CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED: return "CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED";
default: return "<unknown>";
}
}
__host__ void cusolverAssert(cusolverStatus_t err, const char *file, const int line){
if(CUSOLVER_STATUS_SUCCESS != err) {
fprintf(stderr, "CUSOLVE error in file '%s', line %d\n error %d: %s\n terminating!\n",file,line,err,cusolverGetErrorEnum(err));
hipDeviceReset(); assert(0);
}
}
__host__
static const char *cusparseGetErrorEnum(hipsparseStatus_t error){
switch (error){
case HIPSPARSE_STATUS_SUCCESS: return "CUSPARSE_SUCCESS";
case HIPSPARSE_STATUS_NOT_INITIALIZED: return "HIPSPARSE_STATUS_NOT_INITIALIZED";
case HIPSPARSE_STATUS_ALLOC_FAILED: return "HIPSPARSE_STATUS_ALLOC_FAILED";
case HIPSPARSE_STATUS_INVALID_VALUE: return "HIPSPARSE_STATUS_INVALID_VALUE";
case HIPSPARSE_STATUS_ARCH_MISMATCH: return "HIPSPARSE_STATUS_ARCH_MISMATCH";
case HIPSPARSE_STATUS_EXECUTION_FAILED: return "HIPSPARSE_STATUS_EXECUTION_FAILED";
case HIPSPARSE_STATUS_INTERNAL_ERROR: return "HIPSPARSE_STATUS_INTERNAL_ERROR";
case HIPSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED: return "HIPSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED";
default: return "<unknown>";
}
}
__host__ void cusparseAssert(hipsparseStatus_t err, const char *file, const int line){
if(HIPSPARSE_STATUS_SUCCESS != err) {
fprintf(stderr, "CUSPARSE error in file '%s', line %d\n error %d: %s\n terminating!\n",file,line,err,cusparseGetErrorEnum(err));
hipDeviceReset(); assert(0);
}
}
/*** 1 CUDA ERROR CHECKING CODE 1 ***/ | fac79c3ec2b3b0d445f5b80b90b4907a0863b3f0.cu | /*****************************************************************
* Utils for Cuda code
* 1 CUDA ERROR CHECKING CODE
*
* Adapted from:
* https://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api
* https://stackoverflow.com/questions/22399794/qr-decomposition-to-solve-linear-systems-in-cuda
*
* error checking usage for library functions:
* gpuErrchk(cudaMalloc((void**)&a_d, size*sizeof(int)));
* cusolveErrchk(<cusolvefunctioncall>)
* cublasErrchk(<cubalsfunctioncall>)
* error checking usage for custom kernels:
* kernel<<<1,1>>>(a);
* gpuErrchk(cudaPeekAtLastError());
* gpuErrchk(cudaDeviceSynchronize());
*
* 2 Matrix printing code
*
* 3 Templated External Memory Wrapper
*****************************************************************/
#include <stdio.h>
#include <assert.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include <cusolverDn.h>
#include <cusolverSp.h>
/*** 1 CUDA ERROR CHECKING CODE 1 ***/
__host__
void gpuAssert(cudaError_t code, const char *file, const int line, bool abort=true){
if (code != cudaSuccess){
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort){cudaDeviceReset(); exit(code);}
}
}
__host__ __device__
static const char *cublasGetErrorEnum(cublasStatus_t error){
switch (error){
case CUBLAS_STATUS_SUCCESS: return "CUBLAS_STATUS_SUCCESS";
case CUBLAS_STATUS_NOT_INITIALIZED: return "CUBLAS_STATUS_NOT_INITIALIZED";
case CUBLAS_STATUS_ALLOC_FAILED: return "CUBLAS_STATUS_ALLOC_FAILED";
case CUBLAS_STATUS_INVALID_VALUE: return "CUBLAS_STATUS_INVALID_VALUE";
case CUBLAS_STATUS_ARCH_MISMATCH: return "CUBLAS_STATUS_ARCH_MISMATCH";
case CUBLAS_STATUS_MAPPING_ERROR: return "CUBLAS_STATUS_MAPPING_ERROR";
case CUBLAS_STATUS_EXECUTION_FAILED: return "CUBLAS_STATUS_EXECUTION_FAILED";
case CUBLAS_STATUS_INTERNAL_ERROR: return "CUBLAS_STATUS_INTERNAL_ERROR";
case CUBLAS_STATUS_NOT_SUPPORTED: return "CUBLAS_STATUS_NOT_SUPPORTED";
case CUBLAS_STATUS_LICENSE_ERROR: return "CUBLAS_STATUS_LICENSE_ERROR";
default: return "<unknown>";
}
}
__host__ __device__
void cublasAssert(cublasStatus_t err, const char *file, const int line){
if(CUBLAS_STATUS_SUCCESS != err) {
#ifdef __CUDA_ARCH__
printf("CUBLAS error in file '%s', line %d\n error %d: %s\n terminating!\n",file,line,err,cublasGetErrorEnum(err));
#else
fprintf(stderr, "CUBLAS error in file '%s', line %d\n error %d: %s\n terminating!\n",file,line,err,cublasGetErrorEnum(err));
cudaDeviceReset();
#endif
assert(0);
}
}
__host__
static const char *cusolverGetErrorEnum(cusolverStatus_t error){
switch (error){
case CUSOLVER_STATUS_SUCCESS: return "CUSOLVER_SUCCESS";
case CUSOLVER_STATUS_NOT_INITIALIZED: return "CUSOLVER_STATUS_NOT_INITIALIZED";
case CUSOLVER_STATUS_ALLOC_FAILED: return "CUSOLVER_STATUS_ALLOC_FAILED";
case CUSOLVER_STATUS_INVALID_VALUE: return "CUSOLVER_STATUS_INVALID_VALUE";
case CUSOLVER_STATUS_ARCH_MISMATCH: return "CUSOLVER_STATUS_ARCH_MISMATCH";
case CUSOLVER_STATUS_EXECUTION_FAILED: return "CUSOLVER_STATUS_EXECUTION_FAILED";
case CUSOLVER_STATUS_INTERNAL_ERROR: return "CUSOLVER_STATUS_INTERNAL_ERROR";
case CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED: return "CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED";
default: return "<unknown>";
}
}
__host__ void cusolverAssert(cusolverStatus_t err, const char *file, const int line){
if(CUSOLVER_STATUS_SUCCESS != err) {
fprintf(stderr, "CUSOLVE error in file '%s', line %d\n error %d: %s\n terminating!\n",file,line,err,cusolverGetErrorEnum(err));
cudaDeviceReset(); assert(0);
}
}
__host__
static const char *cusparseGetErrorEnum(cusparseStatus_t error){
switch (error){
case CUSPARSE_STATUS_SUCCESS: return "CUSPARSE_SUCCESS";
case CUSPARSE_STATUS_NOT_INITIALIZED: return "CUSPARSE_STATUS_NOT_INITIALIZED";
case CUSPARSE_STATUS_ALLOC_FAILED: return "CUSPARSE_STATUS_ALLOC_FAILED";
case CUSPARSE_STATUS_INVALID_VALUE: return "CUSPARSE_STATUS_INVALID_VALUE";
case CUSPARSE_STATUS_ARCH_MISMATCH: return "CUSPARSE_STATUS_ARCH_MISMATCH";
case CUSPARSE_STATUS_EXECUTION_FAILED: return "CUSPARSE_STATUS_EXECUTION_FAILED";
case CUSPARSE_STATUS_INTERNAL_ERROR: return "CUSPARSE_STATUS_INTERNAL_ERROR";
case CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED: return "CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED";
default: return "<unknown>";
}
}
__host__ void cusparseAssert(cusparseStatus_t err, const char *file, const int line){
if(CUSPARSE_STATUS_SUCCESS != err) {
fprintf(stderr, "CUSPARSE error in file '%s', line %d\n error %d: %s\n terminating!\n",file,line,err,cusparseGetErrorEnum(err));
cudaDeviceReset(); assert(0);
}
}
/*** 1 CUDA ERROR CHECKING CODE 1 ***/ |
74f1e9c356479d52b03fd5288a1e0c2aa7d5c379.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2019 Yan Yan
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <ATen/ATen.h>
#include <chrono>
#include <limits>
#include <spconv/mp_helper.h>
#include <spconv/reordering.h>
#include <spconv/reordering.cu.h>
#include <tensorview/helper_kernel.cu.h>
#include <tensorview/helper_launch.h>
#include <tensorview/tensorview.h>
#include <type_traits>
#include <utility/timer.h>
namespace spconv {
namespace functor {
template <typename T, typename Index>
struct SparseGatherFunctor<tv::GPU, T, Index> {
using vecload_type_t =
std::conditional_t<std::is_same<T, at::Half>::value, int2, int4>;
using kernel_block_t = mp_list_c<int, 64, 32, 16>;
void operator()(const tv::GPU &d, tv::TensorView<T> buffer,
tv::TensorView<const T> features,
tv::TensorView<const Index> indices, int size) {
if (size <= 0)
return;
int numPlanes = features.dim(1);
bool notFound = true;
constexpr int vecloadFactor = sizeof(vecload_type_t) / sizeof(T);
mp_for_each<kernel_block_t>([=, &buffer, &features, &indices,
¬Found](auto NumTLP) {
constexpr int NumILP = NumTLP / 4;
// constexpr int NumILP = NumTLP / (64 / (NumTLP / vecloadFactor));
int nHotBlock = (size / NumTLP) * NumTLP;
if (notFound) {
if (numPlanes % NumTLP == 0) {
if (nHotBlock >= NumTLP) {
hipLaunchKernelGGL(( gatherVecBlockKernel<T, Index, int(NumTLP), NumILP, vecload_type_t>)
, dim3(dim3(numPlanes / NumTLP, size / NumTLP)),
dim3(dim3(NumTLP / vecloadFactor, NumTLP / NumILP)), 0,
d.getStream(), buffer.data(), features.data(), indices.data(),
nHotBlock, numPlanes / vecloadFactor);
TV_CHECK_CUDA_ERR();
}
if (size - nHotBlock > 0) {
hipLaunchKernelGGL(( gatherVecKernel<T, Index, int(NumTLP), NumILP, vecload_type_t>)
, dim3(dim3(1, numPlanes / NumTLP)),
dim3(dim3(NumTLP / NumILP, NumTLP / vecloadFactor)), 0,
d.getStream(), buffer.data() + nHotBlock * numPlanes,
features.data(), indices.data() + nHotBlock,
size - nHotBlock, numPlanes / vecloadFactor);
TV_CHECK_CUDA_ERR();
}
notFound = false;
}
}
});
if (notFound) {
constexpr int NumTLP = 64;
constexpr int NumILP = NumTLP / 4;
hipLaunchKernelGGL(( gatherGenericKernel<T, Index, NumTLP, NumILP>)
, dim3(tv::launch::DivUp(size, NumTLP),
tv::launch::DivUp(numPlanes, NumTLP)),
dim3(dim3(NumTLP / NumILP, NumTLP)), 0, d.getStream(),
buffer.data(), features.data(), indices.data(), size, numPlanes);
TV_CHECK_CUDA_ERR();
}
}
};
template <typename T, typename Index>
struct SparseScatterAddFunctor<tv::GPU, T, Index> {
using vecload_type_t =
std::conditional_t<std::is_same<T, at::Half>::value, int2, int4>;
using kernel_block_t = mp_list_c<int, 64, 32, 16>;
void operator()(const tv::GPU &d, tv::TensorView<T> outFeatures,
tv::TensorView<const T> buffer,
tv::TensorView<const Index> indices, int size, bool stable) {
if (size <= 0)
return;
int numPlanes = outFeatures.dim(1);
bool notFound = true;
constexpr int vecloadFactor =
sizeof(vecload_type_t) / sizeof(T); // important for half.
mp_for_each<kernel_block_t>([=, &d, &outFeatures, &buffer, &indices,
¬Found](auto NumTLP) {
// constexpr int NumILP = NumTLP / (64 / (NumTLP / vecloadFactor));
constexpr int NumILP = NumTLP / 4;
int nHotBlock = (size / NumTLP) * NumTLP;
if (notFound) {
if (numPlanes % NumTLP == 0) {
if (nHotBlock >= NumTLP) {
hipLaunchKernelGGL(( scatterAddVecBlockKernel<T, Index, int(NumTLP), NumILP,
vecload_type_t>)
, dim3(dim3(numPlanes / NumTLP, size / NumTLP)),
dim3(dim3(NumTLP / vecloadFactor, NumTLP / NumILP)), 0,
d.getStream(), outFeatures.data(), buffer.data(),
indices.data(), nHotBlock,
numPlanes / vecloadFactor);
TV_CHECK_CUDA_ERR();
}
if (size - nHotBlock > 0) {
hipLaunchKernelGGL(( scatterAddGenericKernel<T, Index, int(NumTLP), NumILP>)
, dim3(dim3(1, numPlanes / NumTLP)), dim3(dim3(NumTLP / NumILP, NumTLP)),
0, d.getStream(),
outFeatures.data(), buffer.data() + nHotBlock * numPlanes,
indices.data() + nHotBlock, size - nHotBlock, numPlanes);
TV_CHECK_CUDA_ERR();
}
notFound = false;
}
}
});
if (notFound) {
constexpr int NumTLP = 64;
constexpr int NumILP = NumTLP / 4;
hipLaunchKernelGGL(( scatterAddGenericKernel<T, Index, NumTLP, NumILP>)
, dim3(tv::launch::DivUp(size, NumTLP),
tv::launch::DivUp(numPlanes, NumTLP)),
dim3(dim3(NumTLP / NumILP, NumTLP)), 0, d.getStream(),
outFeatures.data(), buffer.data(), indices.data(), size,
numPlanes);
TV_CHECK_CUDA_ERR();
}
}
};
} // namespace functor
#define DECLARE_GPU_SPECS_T_INDEX(T, Index) \
template struct functor::SparseGatherFunctor<tv::GPU, T, Index>; \
template struct functor::SparseScatterAddFunctor<tv::GPU, T, Index>;
#define DECLARE_GPU_SPECS(T) DECLARE_GPU_SPECS_T_INDEX(T, int);
DECLARE_GPU_SPECS(float);
DECLARE_GPU_SPECS(double);
DECLARE_GPU_SPECS(at::Half);
#undef DECLARE_GPU_SPECS
#undef DECLARE_GPU_SPECS_T_INDEX
} // namespace spconv | 74f1e9c356479d52b03fd5288a1e0c2aa7d5c379.cu | // Copyright 2019 Yan Yan
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <ATen/ATen.h>
#include <chrono>
#include <limits>
#include <spconv/mp_helper.h>
#include <spconv/reordering.h>
#include <spconv/reordering.cu.h>
#include <tensorview/helper_kernel.cu.h>
#include <tensorview/helper_launch.h>
#include <tensorview/tensorview.h>
#include <type_traits>
#include <utility/timer.h>
namespace spconv {
namespace functor {
template <typename T, typename Index>
struct SparseGatherFunctor<tv::GPU, T, Index> {
using vecload_type_t =
std::conditional_t<std::is_same<T, at::Half>::value, int2, int4>;
using kernel_block_t = mp_list_c<int, 64, 32, 16>;
void operator()(const tv::GPU &d, tv::TensorView<T> buffer,
tv::TensorView<const T> features,
tv::TensorView<const Index> indices, int size) {
if (size <= 0)
return;
int numPlanes = features.dim(1);
bool notFound = true;
constexpr int vecloadFactor = sizeof(vecload_type_t) / sizeof(T);
mp_for_each<kernel_block_t>([=, &buffer, &features, &indices,
¬Found](auto NumTLP) {
constexpr int NumILP = NumTLP / 4;
// constexpr int NumILP = NumTLP / (64 / (NumTLP / vecloadFactor));
int nHotBlock = (size / NumTLP) * NumTLP;
if (notFound) {
if (numPlanes % NumTLP == 0) {
if (nHotBlock >= NumTLP) {
gatherVecBlockKernel<T, Index, int(NumTLP), NumILP, vecload_type_t>
<<<dim3(numPlanes / NumTLP, size / NumTLP),
dim3(NumTLP / vecloadFactor, NumTLP / NumILP), 0,
d.getStream()>>>(buffer.data(), features.data(), indices.data(),
nHotBlock, numPlanes / vecloadFactor);
TV_CHECK_CUDA_ERR();
}
if (size - nHotBlock > 0) {
gatherVecKernel<T, Index, int(NumTLP), NumILP, vecload_type_t>
<<<dim3(1, numPlanes / NumTLP),
dim3(NumTLP / NumILP, NumTLP / vecloadFactor), 0,
d.getStream()>>>(buffer.data() + nHotBlock * numPlanes,
features.data(), indices.data() + nHotBlock,
size - nHotBlock, numPlanes / vecloadFactor);
TV_CHECK_CUDA_ERR();
}
notFound = false;
}
}
});
if (notFound) {
constexpr int NumTLP = 64;
constexpr int NumILP = NumTLP / 4;
gatherGenericKernel<T, Index, NumTLP, NumILP>
<<<dim3(tv::launch::DivUp(size, NumTLP),
tv::launch::DivUp(numPlanes, NumTLP)),
dim3(NumTLP / NumILP, NumTLP), 0, d.getStream()>>>(
buffer.data(), features.data(), indices.data(), size, numPlanes);
TV_CHECK_CUDA_ERR();
}
}
};
template <typename T, typename Index>
struct SparseScatterAddFunctor<tv::GPU, T, Index> {
using vecload_type_t =
std::conditional_t<std::is_same<T, at::Half>::value, int2, int4>;
using kernel_block_t = mp_list_c<int, 64, 32, 16>;
void operator()(const tv::GPU &d, tv::TensorView<T> outFeatures,
tv::TensorView<const T> buffer,
tv::TensorView<const Index> indices, int size, bool stable) {
if (size <= 0)
return;
int numPlanes = outFeatures.dim(1);
bool notFound = true;
constexpr int vecloadFactor =
sizeof(vecload_type_t) / sizeof(T); // important for half.
mp_for_each<kernel_block_t>([=, &d, &outFeatures, &buffer, &indices,
¬Found](auto NumTLP) {
// constexpr int NumILP = NumTLP / (64 / (NumTLP / vecloadFactor));
constexpr int NumILP = NumTLP / 4;
int nHotBlock = (size / NumTLP) * NumTLP;
if (notFound) {
if (numPlanes % NumTLP == 0) {
if (nHotBlock >= NumTLP) {
scatterAddVecBlockKernel<T, Index, int(NumTLP), NumILP,
vecload_type_t>
<<<dim3(numPlanes / NumTLP, size / NumTLP),
dim3(NumTLP / vecloadFactor, NumTLP / NumILP), 0,
d.getStream()>>>(outFeatures.data(), buffer.data(),
indices.data(), nHotBlock,
numPlanes / vecloadFactor);
TV_CHECK_CUDA_ERR();
}
if (size - nHotBlock > 0) {
scatterAddGenericKernel<T, Index, int(NumTLP), NumILP>
<<<dim3(1, numPlanes / NumTLP), dim3(NumTLP / NumILP, NumTLP),
0, d.getStream()>>>(
outFeatures.data(), buffer.data() + nHotBlock * numPlanes,
indices.data() + nHotBlock, size - nHotBlock, numPlanes);
TV_CHECK_CUDA_ERR();
}
notFound = false;
}
}
});
if (notFound) {
constexpr int NumTLP = 64;
constexpr int NumILP = NumTLP / 4;
scatterAddGenericKernel<T, Index, NumTLP, NumILP>
<<<dim3(tv::launch::DivUp(size, NumTLP),
tv::launch::DivUp(numPlanes, NumTLP)),
dim3(NumTLP / NumILP, NumTLP), 0, d.getStream()>>>(
outFeatures.data(), buffer.data(), indices.data(), size,
numPlanes);
TV_CHECK_CUDA_ERR();
}
}
};
} // namespace functor
#define DECLARE_GPU_SPECS_T_INDEX(T, Index) \
template struct functor::SparseGatherFunctor<tv::GPU, T, Index>; \
template struct functor::SparseScatterAddFunctor<tv::GPU, T, Index>;
#define DECLARE_GPU_SPECS(T) DECLARE_GPU_SPECS_T_INDEX(T, int);
DECLARE_GPU_SPECS(float);
DECLARE_GPU_SPECS(double);
DECLARE_GPU_SPECS(at::Half);
#undef DECLARE_GPU_SPECS
#undef DECLARE_GPU_SPECS_T_INDEX
} // namespace spconv |
5ba8ab1e5e4d8ca8dd8a35bd40850a9c5e36cfdd.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/Context.h>
#include <ATen/Dispatch.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/PointwiseOps.h>
#include <THH/THHNumerics.cuh>
namespace at { namespace native {
void addcmul_cuda_kernel(TensorIterator& iter, Scalar value) {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.dtype(), "addcmul_cuda", [&]() {
auto alpha = value.to<scalar_t>();
gpu_kernel(iter, [alpha]GPU_LAMBDA(scalar_t a, scalar_t b, scalar_t c) -> scalar_t {
return a + alpha * b * c;
});
});
}
void addcdiv_cuda_kernel(TensorIterator& iter, Scalar value) {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.dtype(), "addcdiv_cuda", [&]() {
auto alpha = value.to<scalar_t>();
gpu_kernel(iter, [alpha]GPU_LAMBDA(scalar_t a, scalar_t b, scalar_t c) -> scalar_t {
return a + alpha * (b / c);
});
});
}
void smooth_l1_backward_cuda_kernel(TensorIterator& iter, Scalar norm) {
AT_DISPATCH_ALL_TYPES_AND(kHalf, iter.dtype(), "smooth_l1_backward_cuda", [&]() {
auto norm_val = norm.to<scalar_t>();
gpu_kernel(iter, [norm_val]GPU_LAMBDA(scalar_t input, scalar_t target, scalar_t grad_output) -> scalar_t {
const auto x = input - target;
if (x < scalar_t(-1))
return -norm_val * grad_output;
else if (x > scalar_t(1))
return norm_val * grad_output;
else
return norm_val * x * grad_output;
});
});
}
void mse_backward_cuda_kernel(TensorIterator& iter, Scalar value) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "mse_backward_cuda", [&]() {
auto alpha = value.to<scalar_t>();
gpu_kernel(iter, [alpha]GPU_LAMBDA(scalar_t a, scalar_t b, scalar_t c) -> scalar_t {
return alpha * (a - b) * c;
});
});
}
REGISTER_DISPATCH(addcdiv_stub, &addcdiv_cuda_kernel);
REGISTER_DISPATCH(addcmul_stub, &addcmul_cuda_kernel);
REGISTER_DISPATCH(smooth_l1_backward_stub, &smooth_l1_backward_cuda_kernel);
REGISTER_DISPATCH(mse_backward_stub, &mse_backward_cuda_kernel);
}} // namespace at::native
| 5ba8ab1e5e4d8ca8dd8a35bd40850a9c5e36cfdd.cu | #include <ATen/Context.h>
#include <ATen/Dispatch.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/PointwiseOps.h>
#include <THC/THCNumerics.cuh>
namespace at { namespace native {
void addcmul_cuda_kernel(TensorIterator& iter, Scalar value) {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.dtype(), "addcmul_cuda", [&]() {
auto alpha = value.to<scalar_t>();
gpu_kernel(iter, [alpha]GPU_LAMBDA(scalar_t a, scalar_t b, scalar_t c) -> scalar_t {
return a + alpha * b * c;
});
});
}
void addcdiv_cuda_kernel(TensorIterator& iter, Scalar value) {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.dtype(), "addcdiv_cuda", [&]() {
auto alpha = value.to<scalar_t>();
gpu_kernel(iter, [alpha]GPU_LAMBDA(scalar_t a, scalar_t b, scalar_t c) -> scalar_t {
return a + alpha * (b / c);
});
});
}
void smooth_l1_backward_cuda_kernel(TensorIterator& iter, Scalar norm) {
AT_DISPATCH_ALL_TYPES_AND(kHalf, iter.dtype(), "smooth_l1_backward_cuda", [&]() {
auto norm_val = norm.to<scalar_t>();
gpu_kernel(iter, [norm_val]GPU_LAMBDA(scalar_t input, scalar_t target, scalar_t grad_output) -> scalar_t {
const auto x = input - target;
if (x < scalar_t(-1))
return -norm_val * grad_output;
else if (x > scalar_t(1))
return norm_val * grad_output;
else
return norm_val * x * grad_output;
});
});
}
void mse_backward_cuda_kernel(TensorIterator& iter, Scalar value) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "mse_backward_cuda", [&]() {
auto alpha = value.to<scalar_t>();
gpu_kernel(iter, [alpha]GPU_LAMBDA(scalar_t a, scalar_t b, scalar_t c) -> scalar_t {
return alpha * (a - b) * c;
});
});
}
REGISTER_DISPATCH(addcdiv_stub, &addcdiv_cuda_kernel);
REGISTER_DISPATCH(addcmul_stub, &addcmul_cuda_kernel);
REGISTER_DISPATCH(smooth_l1_backward_stub, &smooth_l1_backward_cuda_kernel);
REGISTER_DISPATCH(mse_backward_stub, &mse_backward_cuda_kernel);
}} // namespace at::native
|
94a2ef5e045d0511f35e3f33d60a9347e8f365a3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//===---- reduction.cu - NVPTX OpenMP reduction implementation ---- CUDA
//-*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is dual licensed under the MIT and the University of Illinois Open
// Source Licenses. See LICENSE.txt for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the implementation of reduction with KMPC interface.
//
//===----------------------------------------------------------------------===//
#include <complex.h>
#include <stdio.h>
#include "omptarget-nvptx.h"
// may eventually remove this
EXTERN
int32_t __gpu_block_reduce() {
int tid = GetLogicalThreadIdInBlock();
int nt = GetNumberOfOmpThreads(tid, isSPMDMode(), isRuntimeUninitialized());
if (nt != blockDim.x)
return 0;
unsigned tnum = __ACTIVEMASK();
if (tnum != (~0x0)) // assume swapSize is 32
return 0;
return 1;
}
EXTERN
int32_t __kmpc_reduce_gpu(kmp_Ident *loc, int32_t global_tid, int32_t num_vars,
size_t reduce_size, void *reduce_data,
void *reduce_array_size, kmp_ReductFctPtr *reductFct,
kmp_CriticalName *lck) {
int threadId = GetLogicalThreadIdInBlock();
omptarget_nvptx_TaskDescr *currTaskDescr = getMyTopTaskDescriptor(threadId);
int numthread;
if (currTaskDescr->IsParallelConstruct()) {
numthread =
GetNumberOfOmpThreads(threadId, checkSPMDMode(loc),
checkRuntimeUninitialized(loc));
} else {
numthread = GetNumberOfOmpTeams();
}
if (numthread == 1)
return 1;
if (!__gpu_block_reduce())
return 2;
if (threadIdx.x == 0)
return 1;
return 0;
}
EXTERN
int32_t __kmpc_reduce_combined(kmp_Ident *loc) {
return threadIdx.x == 0 ? 2 : 0;
}
EXTERN
int32_t __kmpc_reduce_simd(kmp_Ident *loc) {
return (threadIdx.x % 32 == 0) ? 1 : 0;
}
EXTERN
void __kmpc_nvptx_end_reduce(int32_t global_tid) {}
EXTERN
void __kmpc_nvptx_end_reduce_nowait(int32_t global_tid) {}
EXTERN int32_t __kmpc_shuffle_int32(int32_t val, int16_t delta, int16_t size) {
return __SHFL_DOWN_SYNC(0xFFFFFFFF, val, delta, size);
}
EXTERN int64_t __kmpc_shuffle_int64(int64_t val, int16_t delta, int16_t size) {
return __SHFL_DOWN_SYNC(0xFFFFFFFFFFFFFFFFL, val, delta, size);
}
static INLINE void gpu_regular_warp_reduce(void *reduce_data,
kmp_ShuffleReductFctPtr shflFct) {
for (uint32_t mask = WARPSIZE / 2; mask > 0; mask /= 2) {
shflFct(reduce_data, /*LaneId - not used= */ 0,
/*Offset = */ mask, /*AlgoVersion=*/0);
}
}
static INLINE void gpu_irregular_warp_reduce(void *reduce_data,
kmp_ShuffleReductFctPtr shflFct,
uint32_t size, uint32_t tid) {
uint32_t curr_size;
uint32_t mask;
curr_size = size;
mask = curr_size / 2;
while (mask > 0) {
shflFct(reduce_data, /*LaneId = */ tid, /*Offset=*/mask, /*AlgoVersion=*/1);
curr_size = (curr_size + 1) / 2;
mask = curr_size / 2;
}
}
static INLINE uint32_t
gpu_irregular_simd_reduce(void *reduce_data, kmp_ShuffleReductFctPtr shflFct) {
uint32_t lanemask_lt;
uint32_t lanemask_gt;
uint32_t size, remote_id, physical_lane_id;
physical_lane_id = GetThreadIdInBlock() % WARPSIZE;
asm("mov.u32 %0, %%lanemask_lt;" : "=r"(lanemask_lt));
uint32_t Liveness = __BALLOT_SYNC(0xFFFFFFFF, true);
uint32_t logical_lane_id = __popc(Liveness & lanemask_lt) * 2;
asm("mov.u32 %0, %%lanemask_gt;" : "=r"(lanemask_gt));
do {
Liveness = __BALLOT_SYNC(0xFFFFFFFF, true);
remote_id = __ffs(Liveness & lanemask_gt);
size = __popc(Liveness);
logical_lane_id /= 2;
shflFct(reduce_data, /*LaneId =*/logical_lane_id,
/*Offset=*/remote_id - 1 - physical_lane_id, /*AlgoVersion=*/2);
} while (logical_lane_id % 2 == 0 && size > 1);
return (logical_lane_id == 0);
}
EXTERN
int32_t __kmpc_nvptx_simd_reduce_nowait(int32_t global_tid, int32_t num_vars,
size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct,
kmp_InterWarpCopyFctPtr cpyFct) {
uint32_t Liveness = __BALLOT_SYNC(0xFFFFFFFF, true);
if (Liveness == 0xffffffff) {
gpu_regular_warp_reduce(reduce_data, shflFct);
return GetThreadIdInBlock() % WARPSIZE ==
0; // Result on lane 0 of the simd warp.
} else {
return gpu_irregular_simd_reduce(
reduce_data, shflFct); // Result on the first active lane.
}
}
INLINE
int32_t nvptx_parallel_reduce_nowait(int32_t global_tid, int32_t num_vars,
size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct,
kmp_InterWarpCopyFctPtr cpyFct,
bool isSPMDExecutionMode,
bool isRuntimeUninitialized) {
uint32_t BlockThreadId = GetLogicalThreadIdInBlock();
uint32_t NumThreads = GetNumberOfOmpThreads(
BlockThreadId, isSPMDExecutionMode, isRuntimeUninitialized);
if (NumThreads == 1)
return 1;
/*
* This reduce function handles reduction within a team. It handles
* parallel regions in both L1 and L2 parallelism levels. It also
* supports Generic, SPMD, and NoOMP modes.
*
* 1. Reduce within a warp.
* 2. Warp master copies value to warp 0 via shared memory.
* 3. Warp 0 reduces to a single value.
* 4. The reduced value is available in the thread that returns 1.
*/
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
uint32_t WarpsNeeded = (NumThreads + WARPSIZE - 1) / WARPSIZE;
uint32_t WarpId = BlockThreadId / WARPSIZE;
// Volta execution model:
// For the Generic execution mode a parallel region either has 1 thread and
// beyond that, always a multiple of 32. For the SPMD execution mode we may
// have any number of threads.
if ((NumThreads % WARPSIZE == 0) || (WarpId < WarpsNeeded - 1))
gpu_regular_warp_reduce(reduce_data, shflFct);
else if (NumThreads > 1) // Only SPMD execution mode comes thru this case.
gpu_irregular_warp_reduce(reduce_data, shflFct,
/*LaneCount=*/NumThreads % WARPSIZE,
/*LaneId=*/GetThreadIdInBlock() % WARPSIZE);
// When we have more than [warpsize] number of threads
// a block reduction is performed here.
//
// Only L1 parallel region can enter this if condition.
if (NumThreads > WARPSIZE) {
// Gather all the reduced values from each warp
// to the first warp.
cpyFct(reduce_data, WarpsNeeded);
if (WarpId == 0)
gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded,
BlockThreadId);
return BlockThreadId == 0;
}
return BlockThreadId == 0;
#else
uint32_t Liveness = __BALLOT_SYNC(0xFFFFFFFF, true);
if (Liveness == 0xffffffff) // Full warp
gpu_regular_warp_reduce(reduce_data, shflFct);
else if (!(Liveness & (Liveness + 1))) // Partial warp but contiguous lanes
gpu_irregular_warp_reduce(reduce_data, shflFct,
/*LaneCount=*/__popc(Liveness),
/*LaneId=*/GetThreadIdInBlock() % WARPSIZE);
else if (!isRuntimeUninitialized) // Dispersed lanes. Only threads in L2
// parallel region may enter here; return
// early.
return gpu_irregular_simd_reduce(reduce_data, shflFct);
// When we have more than [warpsize] number of threads
// a block reduction is performed here.
//
// Only L1 parallel region can enter this if condition.
if (NumThreads > WARPSIZE) {
uint32_t WarpsNeeded = (NumThreads + WARPSIZE - 1) / WARPSIZE;
// Gather all the reduced values from each warp
// to the first warp.
cpyFct(reduce_data, WarpsNeeded);
uint32_t WarpId = BlockThreadId / WARPSIZE;
if (WarpId == 0)
gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded,
BlockThreadId);
return BlockThreadId == 0;
} else if (isRuntimeUninitialized /* Never an L2 parallel region without the OMP runtime */) {
return BlockThreadId == 0;
}
// Get the OMP thread Id. This is different from BlockThreadId in the case of
// an L2 parallel region.
return global_tid == 0;
#endif // __CUDA_ARCH__ >= 700
}
EXTERN
int32_t __kmpc_nvptx_parallel_reduce_nowait(
int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct) {
return nvptx_parallel_reduce_nowait(
global_tid, num_vars, reduce_size, reduce_data, shflFct, cpyFct,
/*isSPMDExecutionMode=*/isSPMDMode(),
/*isRuntimeUninitialized=*/isRuntimeUninitialized());
}
EXTERN
int32_t __kmpc_nvptx_parallel_reduce_nowait_simple_spmd(
int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct) {
return nvptx_parallel_reduce_nowait(global_tid, num_vars, reduce_size,
reduce_data, shflFct, cpyFct,
/*isSPMDExecutionMode=*/true,
/*isRuntimeUninitialized=*/true);
}
EXTERN
int32_t __kmpc_nvptx_parallel_reduce_nowait_simple_generic(
int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct) {
return nvptx_parallel_reduce_nowait(global_tid, num_vars, reduce_size,
reduce_data, shflFct, cpyFct,
/*isSPMDExecutionMode=*/false,
/*isRuntimeUninitialized=*/true);
}
INLINE
int32_t nvptx_teams_reduce_nowait(
int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct,
kmp_CopyToScratchpadFctPtr scratchFct, kmp_LoadReduceFctPtr ldFct,
bool isSPMDExecutionMode, bool isRuntimeUninitialized) {
uint32_t ThreadId = GetLogicalThreadIdInBlock();
// In non-generic mode all workers participate in the teams reduction.
// In generic mode only the team master participates in the teams
// reduction because the workers are waiting for parallel work.
uint32_t NumThreads =
isSPMDExecutionMode
? GetNumberOfOmpThreads(ThreadId, /*isSPMDExecutionMode=*/true,
isRuntimeUninitialized)
: /*Master thread only*/ 1;
uint32_t TeamId = GetBlockIdInKernel();
uint32_t NumTeams = GetNumberOfBlocksInKernel();
__shared__ volatile bool IsLastTeam;
// Team masters of all teams write to the scratchpad.
if (ThreadId == 0) {
unsigned int *timestamp = GetTeamsReductionTimestamp();
char *scratchpad = GetTeamsReductionScratchpad();
scratchFct(reduce_data, scratchpad, TeamId, NumTeams);
__threadfence();
// atomicInc increments 'timestamp' and has a range [0, NumTeams-1].
// It resets 'timestamp' back to 0 once the last team increments
// this counter.
unsigned val = atomicInc(timestamp, NumTeams - 1);
IsLastTeam = val == NumTeams - 1;
}
// We have to wait on L1 barrier because in GENERIC mode the workers
// are waiting on barrier 0 for work.
//
// If we guard this barrier as follows it leads to deadlock, probably
// because of a compiler bug: if (!IsGenericMode()) __syncthreads();
uint16_t SyncWarps = (NumThreads + WARPSIZE - 1) / WARPSIZE;
named_sync(L1_BARRIER, SyncWarps * WARPSIZE);
// If this team is not the last, quit.
if (/* Volatile read by all threads */ !IsLastTeam)
return 0;
//
// Last team processing.
//
// Threads in excess of #teams do not participate in reduction of the
// scratchpad values.
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
uint32_t ActiveThreads = NumThreads;
if (NumTeams < NumThreads) {
ActiveThreads =
(NumTeams < WARPSIZE) ? 1 : NumTeams & ~((uint16_t)WARPSIZE - 1);
}
if (ThreadId >= ActiveThreads)
return 0;
// Load from scratchpad and reduce.
char *scratchpad = GetTeamsReductionScratchpad();
ldFct(reduce_data, scratchpad, ThreadId, NumTeams, /*Load only*/ 0);
for (uint32_t i = ActiveThreads + ThreadId; i < NumTeams; i += ActiveThreads)
ldFct(reduce_data, scratchpad, i, NumTeams, /*Load and reduce*/ 1);
uint32_t WarpsNeeded = (ActiveThreads + WARPSIZE - 1) / WARPSIZE;
uint32_t WarpId = ThreadId / WARPSIZE;
// Reduce across warps to the warp master.
if ((ActiveThreads % WARPSIZE == 0) ||
(WarpId < WarpsNeeded - 1)) // Full warp
gpu_regular_warp_reduce(reduce_data, shflFct);
else if (ActiveThreads > 1) // Partial warp but contiguous lanes
// Only SPMD execution mode comes thru this case.
gpu_irregular_warp_reduce(reduce_data, shflFct,
/*LaneCount=*/ActiveThreads % WARPSIZE,
/*LaneId=*/ThreadId % WARPSIZE);
// When we have more than [warpsize] number of threads
// a block reduction is performed here.
if (ActiveThreads > WARPSIZE) {
// Gather all the reduced values from each warp
// to the first warp.
cpyFct(reduce_data, WarpsNeeded);
if (WarpId == 0)
gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded, ThreadId);
}
#else
if (ThreadId >= NumTeams)
return 0;
// Load from scratchpad and reduce.
char *scratchpad = GetTeamsReductionScratchpad();
ldFct(reduce_data, scratchpad, ThreadId, NumTeams, /*Load only*/ 0);
for (uint32_t i = NumThreads + ThreadId; i < NumTeams; i += NumThreads)
ldFct(reduce_data, scratchpad, i, NumTeams, /*Load and reduce*/ 1);
// Reduce across warps to the warp master.
uint32_t Liveness = __BALLOT_SYNC(0xFFFFFFFF, true);
if (Liveness == 0xffffffff) // Full warp
gpu_regular_warp_reduce(reduce_data, shflFct);
else // Partial warp but contiguous lanes
gpu_irregular_warp_reduce(reduce_data, shflFct,
/*LaneCount=*/__popc(Liveness),
/*LaneId=*/ThreadId % WARPSIZE);
// When we have more than [warpsize] number of threads
// a block reduction is performed here.
uint32_t ActiveThreads = NumTeams < NumThreads ? NumTeams : NumThreads;
if (ActiveThreads > WARPSIZE) {
uint32_t WarpsNeeded = (ActiveThreads + WARPSIZE - 1) / WARPSIZE;
// Gather all the reduced values from each warp
// to the first warp.
cpyFct(reduce_data, WarpsNeeded);
uint32_t WarpId = ThreadId / WARPSIZE;
if (WarpId == 0)
gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded, ThreadId);
}
#endif // __CUDA_ARCH__ >= 700
return ThreadId == 0;
}
EXTERN
int32_t __kmpc_nvptx_teams_reduce_nowait(int32_t global_tid, int32_t num_vars,
size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct,
kmp_InterWarpCopyFctPtr cpyFct,
kmp_CopyToScratchpadFctPtr scratchFct,
kmp_LoadReduceFctPtr ldFct) {
return nvptx_teams_reduce_nowait(
global_tid, num_vars, reduce_size, reduce_data, shflFct, cpyFct,
scratchFct, ldFct, /*isSPMDExecutionMode=*/isSPMDMode(),
/*isRuntimeUninitialized=*/isRuntimeUninitialized());
}
EXTERN
int32_t __kmpc_nvptx_teams_reduce_nowait_simple_spmd(
int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct,
kmp_CopyToScratchpadFctPtr scratchFct, kmp_LoadReduceFctPtr ldFct) {
return nvptx_teams_reduce_nowait(global_tid, num_vars, reduce_size,
reduce_data, shflFct, cpyFct, scratchFct,
ldFct,
/*isSPMDExecutionMode=*/true,
/*isRuntimeUninitialized=*/true);
}
EXTERN
int32_t __kmpc_nvptx_teams_reduce_nowait_simple_generic(
int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct,
kmp_CopyToScratchpadFctPtr scratchFct, kmp_LoadReduceFctPtr ldFct) {
return nvptx_teams_reduce_nowait(global_tid, num_vars, reduce_size,
reduce_data, shflFct, cpyFct, scratchFct,
ldFct,
/*isSPMDExecutionMode=*/false,
/*isRuntimeUninitialized=*/true);
}
EXTERN int32_t __kmpc_nvptx_teams_reduce_nowait_simple(kmp_Ident *loc,
int32_t global_tid,
kmp_CriticalName *crit) {
if (checkSPMDMode(loc) && GetThreadIdInBlock() != 0)
return 0;
// The master thread of the team actually does the reduction.
while (atomicCAS((uint32_t *)crit, 0, 1))
;
return 1;
}
EXTERN void
__kmpc_nvptx_teams_end_reduce_nowait_simple(kmp_Ident *loc, int32_t global_tid,
kmp_CriticalName *crit) {
__threadfence_system();
(void)atomicExch((uint32_t *)crit, 0);
}
| 94a2ef5e045d0511f35e3f33d60a9347e8f365a3.cu | //===---- reduction.cu - NVPTX OpenMP reduction implementation ---- CUDA
//-*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is dual licensed under the MIT and the University of Illinois Open
// Source Licenses. See LICENSE.txt for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the implementation of reduction with KMPC interface.
//
//===----------------------------------------------------------------------===//
#include <complex.h>
#include <stdio.h>
#include "omptarget-nvptx.h"
// may eventually remove this
EXTERN
int32_t __gpu_block_reduce() {
int tid = GetLogicalThreadIdInBlock();
int nt = GetNumberOfOmpThreads(tid, isSPMDMode(), isRuntimeUninitialized());
if (nt != blockDim.x)
return 0;
unsigned tnum = __ACTIVEMASK();
if (tnum != (~0x0)) // assume swapSize is 32
return 0;
return 1;
}
EXTERN
int32_t __kmpc_reduce_gpu(kmp_Ident *loc, int32_t global_tid, int32_t num_vars,
size_t reduce_size, void *reduce_data,
void *reduce_array_size, kmp_ReductFctPtr *reductFct,
kmp_CriticalName *lck) {
int threadId = GetLogicalThreadIdInBlock();
omptarget_nvptx_TaskDescr *currTaskDescr = getMyTopTaskDescriptor(threadId);
int numthread;
if (currTaskDescr->IsParallelConstruct()) {
numthread =
GetNumberOfOmpThreads(threadId, checkSPMDMode(loc),
checkRuntimeUninitialized(loc));
} else {
numthread = GetNumberOfOmpTeams();
}
if (numthread == 1)
return 1;
if (!__gpu_block_reduce())
return 2;
if (threadIdx.x == 0)
return 1;
return 0;
}
EXTERN
int32_t __kmpc_reduce_combined(kmp_Ident *loc) {
return threadIdx.x == 0 ? 2 : 0;
}
EXTERN
int32_t __kmpc_reduce_simd(kmp_Ident *loc) {
return (threadIdx.x % 32 == 0) ? 1 : 0;
}
EXTERN
void __kmpc_nvptx_end_reduce(int32_t global_tid) {}
EXTERN
void __kmpc_nvptx_end_reduce_nowait(int32_t global_tid) {}
EXTERN int32_t __kmpc_shuffle_int32(int32_t val, int16_t delta, int16_t size) {
return __SHFL_DOWN_SYNC(0xFFFFFFFF, val, delta, size);
}
EXTERN int64_t __kmpc_shuffle_int64(int64_t val, int16_t delta, int16_t size) {
return __SHFL_DOWN_SYNC(0xFFFFFFFFFFFFFFFFL, val, delta, size);
}
static INLINE void gpu_regular_warp_reduce(void *reduce_data,
kmp_ShuffleReductFctPtr shflFct) {
for (uint32_t mask = WARPSIZE / 2; mask > 0; mask /= 2) {
shflFct(reduce_data, /*LaneId - not used= */ 0,
/*Offset = */ mask, /*AlgoVersion=*/0);
}
}
static INLINE void gpu_irregular_warp_reduce(void *reduce_data,
kmp_ShuffleReductFctPtr shflFct,
uint32_t size, uint32_t tid) {
uint32_t curr_size;
uint32_t mask;
curr_size = size;
mask = curr_size / 2;
while (mask > 0) {
shflFct(reduce_data, /*LaneId = */ tid, /*Offset=*/mask, /*AlgoVersion=*/1);
curr_size = (curr_size + 1) / 2;
mask = curr_size / 2;
}
}
static INLINE uint32_t
gpu_irregular_simd_reduce(void *reduce_data, kmp_ShuffleReductFctPtr shflFct) {
uint32_t lanemask_lt;
uint32_t lanemask_gt;
uint32_t size, remote_id, physical_lane_id;
physical_lane_id = GetThreadIdInBlock() % WARPSIZE;
asm("mov.u32 %0, %%lanemask_lt;" : "=r"(lanemask_lt));
uint32_t Liveness = __BALLOT_SYNC(0xFFFFFFFF, true);
uint32_t logical_lane_id = __popc(Liveness & lanemask_lt) * 2;
asm("mov.u32 %0, %%lanemask_gt;" : "=r"(lanemask_gt));
do {
Liveness = __BALLOT_SYNC(0xFFFFFFFF, true);
remote_id = __ffs(Liveness & lanemask_gt);
size = __popc(Liveness);
logical_lane_id /= 2;
shflFct(reduce_data, /*LaneId =*/logical_lane_id,
/*Offset=*/remote_id - 1 - physical_lane_id, /*AlgoVersion=*/2);
} while (logical_lane_id % 2 == 0 && size > 1);
return (logical_lane_id == 0);
}
EXTERN
int32_t __kmpc_nvptx_simd_reduce_nowait(int32_t global_tid, int32_t num_vars,
size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct,
kmp_InterWarpCopyFctPtr cpyFct) {
uint32_t Liveness = __BALLOT_SYNC(0xFFFFFFFF, true);
if (Liveness == 0xffffffff) {
gpu_regular_warp_reduce(reduce_data, shflFct);
return GetThreadIdInBlock() % WARPSIZE ==
0; // Result on lane 0 of the simd warp.
} else {
return gpu_irregular_simd_reduce(
reduce_data, shflFct); // Result on the first active lane.
}
}
INLINE
int32_t nvptx_parallel_reduce_nowait(int32_t global_tid, int32_t num_vars,
size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct,
kmp_InterWarpCopyFctPtr cpyFct,
bool isSPMDExecutionMode,
bool isRuntimeUninitialized) {
uint32_t BlockThreadId = GetLogicalThreadIdInBlock();
uint32_t NumThreads = GetNumberOfOmpThreads(
BlockThreadId, isSPMDExecutionMode, isRuntimeUninitialized);
if (NumThreads == 1)
return 1;
/*
* This reduce function handles reduction within a team. It handles
* parallel regions in both L1 and L2 parallelism levels. It also
* supports Generic, SPMD, and NoOMP modes.
*
* 1. Reduce within a warp.
* 2. Warp master copies value to warp 0 via shared memory.
* 3. Warp 0 reduces to a single value.
* 4. The reduced value is available in the thread that returns 1.
*/
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
uint32_t WarpsNeeded = (NumThreads + WARPSIZE - 1) / WARPSIZE;
uint32_t WarpId = BlockThreadId / WARPSIZE;
// Volta execution model:
// For the Generic execution mode a parallel region either has 1 thread and
// beyond that, always a multiple of 32. For the SPMD execution mode we may
// have any number of threads.
if ((NumThreads % WARPSIZE == 0) || (WarpId < WarpsNeeded - 1))
gpu_regular_warp_reduce(reduce_data, shflFct);
else if (NumThreads > 1) // Only SPMD execution mode comes thru this case.
gpu_irregular_warp_reduce(reduce_data, shflFct,
/*LaneCount=*/NumThreads % WARPSIZE,
/*LaneId=*/GetThreadIdInBlock() % WARPSIZE);
// When we have more than [warpsize] number of threads
// a block reduction is performed here.
//
// Only L1 parallel region can enter this if condition.
if (NumThreads > WARPSIZE) {
// Gather all the reduced values from each warp
// to the first warp.
cpyFct(reduce_data, WarpsNeeded);
if (WarpId == 0)
gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded,
BlockThreadId);
return BlockThreadId == 0;
}
return BlockThreadId == 0;
#else
uint32_t Liveness = __BALLOT_SYNC(0xFFFFFFFF, true);
if (Liveness == 0xffffffff) // Full warp
gpu_regular_warp_reduce(reduce_data, shflFct);
else if (!(Liveness & (Liveness + 1))) // Partial warp but contiguous lanes
gpu_irregular_warp_reduce(reduce_data, shflFct,
/*LaneCount=*/__popc(Liveness),
/*LaneId=*/GetThreadIdInBlock() % WARPSIZE);
else if (!isRuntimeUninitialized) // Dispersed lanes. Only threads in L2
// parallel region may enter here; return
// early.
return gpu_irregular_simd_reduce(reduce_data, shflFct);
// When we have more than [warpsize] number of threads
// a block reduction is performed here.
//
// Only L1 parallel region can enter this if condition.
if (NumThreads > WARPSIZE) {
uint32_t WarpsNeeded = (NumThreads + WARPSIZE - 1) / WARPSIZE;
// Gather all the reduced values from each warp
// to the first warp.
cpyFct(reduce_data, WarpsNeeded);
uint32_t WarpId = BlockThreadId / WARPSIZE;
if (WarpId == 0)
gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded,
BlockThreadId);
return BlockThreadId == 0;
} else if (isRuntimeUninitialized /* Never an L2 parallel region without the OMP runtime */) {
return BlockThreadId == 0;
}
// Get the OMP thread Id. This is different from BlockThreadId in the case of
// an L2 parallel region.
return global_tid == 0;
#endif // __CUDA_ARCH__ >= 700
}
EXTERN
int32_t __kmpc_nvptx_parallel_reduce_nowait(
int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct) {
return nvptx_parallel_reduce_nowait(
global_tid, num_vars, reduce_size, reduce_data, shflFct, cpyFct,
/*isSPMDExecutionMode=*/isSPMDMode(),
/*isRuntimeUninitialized=*/isRuntimeUninitialized());
}
EXTERN
int32_t __kmpc_nvptx_parallel_reduce_nowait_simple_spmd(
int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct) {
return nvptx_parallel_reduce_nowait(global_tid, num_vars, reduce_size,
reduce_data, shflFct, cpyFct,
/*isSPMDExecutionMode=*/true,
/*isRuntimeUninitialized=*/true);
}
EXTERN
int32_t __kmpc_nvptx_parallel_reduce_nowait_simple_generic(
int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct) {
return nvptx_parallel_reduce_nowait(global_tid, num_vars, reduce_size,
reduce_data, shflFct, cpyFct,
/*isSPMDExecutionMode=*/false,
/*isRuntimeUninitialized=*/true);
}
INLINE
int32_t nvptx_teams_reduce_nowait(
int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct,
kmp_CopyToScratchpadFctPtr scratchFct, kmp_LoadReduceFctPtr ldFct,
bool isSPMDExecutionMode, bool isRuntimeUninitialized) {
uint32_t ThreadId = GetLogicalThreadIdInBlock();
// In non-generic mode all workers participate in the teams reduction.
// In generic mode only the team master participates in the teams
// reduction because the workers are waiting for parallel work.
uint32_t NumThreads =
isSPMDExecutionMode
? GetNumberOfOmpThreads(ThreadId, /*isSPMDExecutionMode=*/true,
isRuntimeUninitialized)
: /*Master thread only*/ 1;
uint32_t TeamId = GetBlockIdInKernel();
uint32_t NumTeams = GetNumberOfBlocksInKernel();
__shared__ volatile bool IsLastTeam;
// Team masters of all teams write to the scratchpad.
if (ThreadId == 0) {
unsigned int *timestamp = GetTeamsReductionTimestamp();
char *scratchpad = GetTeamsReductionScratchpad();
scratchFct(reduce_data, scratchpad, TeamId, NumTeams);
__threadfence();
// atomicInc increments 'timestamp' and has a range [0, NumTeams-1].
// It resets 'timestamp' back to 0 once the last team increments
// this counter.
unsigned val = atomicInc(timestamp, NumTeams - 1);
IsLastTeam = val == NumTeams - 1;
}
// We have to wait on L1 barrier because in GENERIC mode the workers
// are waiting on barrier 0 for work.
//
// If we guard this barrier as follows it leads to deadlock, probably
// because of a compiler bug: if (!IsGenericMode()) __syncthreads();
uint16_t SyncWarps = (NumThreads + WARPSIZE - 1) / WARPSIZE;
named_sync(L1_BARRIER, SyncWarps * WARPSIZE);
// If this team is not the last, quit.
if (/* Volatile read by all threads */ !IsLastTeam)
return 0;
//
// Last team processing.
//
// Threads in excess of #teams do not participate in reduction of the
// scratchpad values.
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
uint32_t ActiveThreads = NumThreads;
if (NumTeams < NumThreads) {
ActiveThreads =
(NumTeams < WARPSIZE) ? 1 : NumTeams & ~((uint16_t)WARPSIZE - 1);
}
if (ThreadId >= ActiveThreads)
return 0;
// Load from scratchpad and reduce.
char *scratchpad = GetTeamsReductionScratchpad();
ldFct(reduce_data, scratchpad, ThreadId, NumTeams, /*Load only*/ 0);
for (uint32_t i = ActiveThreads + ThreadId; i < NumTeams; i += ActiveThreads)
ldFct(reduce_data, scratchpad, i, NumTeams, /*Load and reduce*/ 1);
uint32_t WarpsNeeded = (ActiveThreads + WARPSIZE - 1) / WARPSIZE;
uint32_t WarpId = ThreadId / WARPSIZE;
// Reduce across warps to the warp master.
if ((ActiveThreads % WARPSIZE == 0) ||
(WarpId < WarpsNeeded - 1)) // Full warp
gpu_regular_warp_reduce(reduce_data, shflFct);
else if (ActiveThreads > 1) // Partial warp but contiguous lanes
// Only SPMD execution mode comes thru this case.
gpu_irregular_warp_reduce(reduce_data, shflFct,
/*LaneCount=*/ActiveThreads % WARPSIZE,
/*LaneId=*/ThreadId % WARPSIZE);
// When we have more than [warpsize] number of threads
// a block reduction is performed here.
if (ActiveThreads > WARPSIZE) {
// Gather all the reduced values from each warp
// to the first warp.
cpyFct(reduce_data, WarpsNeeded);
if (WarpId == 0)
gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded, ThreadId);
}
#else
if (ThreadId >= NumTeams)
return 0;
// Load from scratchpad and reduce.
char *scratchpad = GetTeamsReductionScratchpad();
ldFct(reduce_data, scratchpad, ThreadId, NumTeams, /*Load only*/ 0);
for (uint32_t i = NumThreads + ThreadId; i < NumTeams; i += NumThreads)
ldFct(reduce_data, scratchpad, i, NumTeams, /*Load and reduce*/ 1);
// Reduce across warps to the warp master.
uint32_t Liveness = __BALLOT_SYNC(0xFFFFFFFF, true);
if (Liveness == 0xffffffff) // Full warp
gpu_regular_warp_reduce(reduce_data, shflFct);
else // Partial warp but contiguous lanes
gpu_irregular_warp_reduce(reduce_data, shflFct,
/*LaneCount=*/__popc(Liveness),
/*LaneId=*/ThreadId % WARPSIZE);
// When we have more than [warpsize] number of threads
// a block reduction is performed here.
uint32_t ActiveThreads = NumTeams < NumThreads ? NumTeams : NumThreads;
if (ActiveThreads > WARPSIZE) {
uint32_t WarpsNeeded = (ActiveThreads + WARPSIZE - 1) / WARPSIZE;
// Gather all the reduced values from each warp
// to the first warp.
cpyFct(reduce_data, WarpsNeeded);
uint32_t WarpId = ThreadId / WARPSIZE;
if (WarpId == 0)
gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded, ThreadId);
}
#endif // __CUDA_ARCH__ >= 700
return ThreadId == 0;
}
EXTERN
int32_t __kmpc_nvptx_teams_reduce_nowait(int32_t global_tid, int32_t num_vars,
size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct,
kmp_InterWarpCopyFctPtr cpyFct,
kmp_CopyToScratchpadFctPtr scratchFct,
kmp_LoadReduceFctPtr ldFct) {
return nvptx_teams_reduce_nowait(
global_tid, num_vars, reduce_size, reduce_data, shflFct, cpyFct,
scratchFct, ldFct, /*isSPMDExecutionMode=*/isSPMDMode(),
/*isRuntimeUninitialized=*/isRuntimeUninitialized());
}
EXTERN
int32_t __kmpc_nvptx_teams_reduce_nowait_simple_spmd(
int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct,
kmp_CopyToScratchpadFctPtr scratchFct, kmp_LoadReduceFctPtr ldFct) {
return nvptx_teams_reduce_nowait(global_tid, num_vars, reduce_size,
reduce_data, shflFct, cpyFct, scratchFct,
ldFct,
/*isSPMDExecutionMode=*/true,
/*isRuntimeUninitialized=*/true);
}
EXTERN
int32_t __kmpc_nvptx_teams_reduce_nowait_simple_generic(
int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct,
kmp_CopyToScratchpadFctPtr scratchFct, kmp_LoadReduceFctPtr ldFct) {
return nvptx_teams_reduce_nowait(global_tid, num_vars, reduce_size,
reduce_data, shflFct, cpyFct, scratchFct,
ldFct,
/*isSPMDExecutionMode=*/false,
/*isRuntimeUninitialized=*/true);
}
EXTERN int32_t __kmpc_nvptx_teams_reduce_nowait_simple(kmp_Ident *loc,
int32_t global_tid,
kmp_CriticalName *crit) {
if (checkSPMDMode(loc) && GetThreadIdInBlock() != 0)
return 0;
// The master thread of the team actually does the reduction.
while (atomicCAS((uint32_t *)crit, 0, 1))
;
return 1;
}
EXTERN void
__kmpc_nvptx_teams_end_reduce_nowait_simple(kmp_Ident *loc, int32_t global_tid,
kmp_CriticalName *crit) {
__threadfence_system();
(void)atomicExch((uint32_t *)crit, 0);
}
|
519c70ad409c4625b0af38683336fb06458b016a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include <cmath>
#include <iostream>
#include <vector>
#include <thrust/device_ptr.h>
#include <thrust/fill.h>
#include <thrust/for_each.h>
#include <thrust/iterator/counting_iterator.h>
#include <cuml/tsa/batched_arima.hpp>
#include <cuml/tsa/batched_kalman.hpp>
#include <raft/cudart_utils.h>
#include <common/nvtx.hpp>
#include <cuml/common/device_buffer.hpp>
#include <linalg/batched/matrix.cuh>
#include <metrics/batched/information_criterion.cuh>
#include <raft/cuda_utils.cuh>
#include <raft/handle.hpp>
#include <raft/linalg/matrix_vector_op.cuh>
#include <timeSeries/arima_helpers.cuh>
namespace ML {
void pack(raft::handle_t& handle,
const ARIMAParams<double>& params,
const ARIMAOrder& order,
int batch_size,
double* param_vec)
{
const auto stream = handle.get_stream();
params.pack(order, batch_size, param_vec, stream);
}
void unpack(raft::handle_t& handle,
ARIMAParams<double>& params,
const ARIMAOrder& order,
int batch_size,
const double* param_vec)
{
const auto stream = handle.get_stream();
params.unpack(order, batch_size, param_vec, stream);
}
void batched_diff(raft::handle_t& handle,
double* d_y_diff,
const double* d_y,
int batch_size,
int n_obs,
const ARIMAOrder& order)
{
const auto stream = handle.get_stream();
MLCommon::TimeSeries::prepare_data(
d_y_diff, d_y, batch_size, n_obs, order.d, order.D, order.s, stream);
}
void predict(raft::handle_t& handle,
const ARIMAMemory<double>& arima_mem,
const double* d_y,
int batch_size,
int n_obs,
int start,
int end,
const ARIMAOrder& order,
const ARIMAParams<double>& params,
double* d_y_p,
bool pre_diff,
double level,
double* d_lower,
double* d_upper)
{
ML::PUSH_RANGE(__func__);
auto allocator = handle.get_device_allocator();
const auto stream = handle.get_stream();
bool diff = order.need_diff() && pre_diff && level == 0;
// Prepare data
int n_obs_kf;
const double* d_y_kf;
ARIMAOrder order_after_prep = order;
if (diff) {
n_obs_kf = n_obs - order.n_diff();
MLCommon::TimeSeries::prepare_data(
arima_mem.y_diff, d_y, batch_size, n_obs, order.d, order.D, order.s, stream);
order_after_prep.d = 0;
order_after_prep.D = 0;
d_y_kf = arima_mem.y_diff;
} else {
n_obs_kf = n_obs;
d_y_kf = d_y;
}
double* d_vs = arima_mem.vs;
// Create temporary array for the forecasts
int num_steps = ::max(end - n_obs, 0);
MLCommon::device_buffer<double> fc_buffer(allocator, stream, num_steps * batch_size);
double* d_y_fc = fc_buffer.data();
// Compute the residual and forecast
std::vector<double> loglike = std::vector<double>(batch_size);
/// TODO: use device loglike to avoid useless copy ; part of #2233
batched_loglike(handle,
arima_mem,
d_y_kf,
batch_size,
n_obs_kf,
order_after_prep,
params,
loglike.data(),
d_vs,
false,
true,
MLE,
0,
num_steps,
d_y_fc,
level,
d_lower,
d_upper);
auto counting = thrust::make_counting_iterator(0);
int predict_ld = end - start;
//
// In-sample prediction
//
int res_offset = diff ? order.d + order.s * order.D : 0;
int p_start = ::max(start, res_offset);
int p_end = ::min(n_obs, end);
// The prediction loop starts by filling undefined predictions with NaN,
// then computes the predictions from the observations and residuals
if (start < n_obs) {
thrust::for_each(
thrust::hip::par.on(stream), counting, counting + batch_size, [=] __device__(int bid) {
d_y_p[0] = 0.0;
for (int i = 0; i < res_offset - start; i++) {
d_y_p[bid * predict_ld + i] = nan("");
}
for (int i = p_start; i < p_end; i++) {
d_y_p[bid * predict_ld + i - start] =
d_y[bid * n_obs + i] - d_vs[bid * n_obs_kf + i - res_offset];
}
});
}
//
// Finalize out-of-sample forecast and copy in-sample predictions
//
if (num_steps) {
if (diff) {
MLCommon::TimeSeries::finalize_forecast(
d_y_fc, d_y, num_steps, batch_size, n_obs, n_obs, order.d, order.D, order.s, stream);
}
// Copy forecast in d_y_p
thrust::for_each(
thrust::hip::par.on(stream), counting, counting + batch_size, [=] __device__(int bid) {
for (int i = 0; i < num_steps; i++) {
d_y_p[bid * predict_ld + n_obs - start + i] = d_y_fc[num_steps * bid + i];
}
});
/// TODO: 2D copy kernel?
}
ML::POP_RANGE();
}
/**
* Kernel to compute the sum-of-squares log-likelihood estimation
*
* @param[in] d_y Series to fit
* @param[in] d_mu mu parameters
* @param[in] d_ar AR parameters
* @param[in] d_ma MA parameters
* @param[in] d_sar Seasonal AR parameters
* @param[in] d_sma Seasonal MA parameters
* @param[out] d_loglike Evaluated log-likelihood
* @param[in] n_obs Number of observations in a time series
* @param[in] n_phi Number of phi coefficients (combined AR-SAR)
* @param[in] n_theta Number of theta coefficients (combined MA-SMA)
* @param[in] p Number of AR parameters
* @param[in] q Number of MA parameters
* @param[in] P Number of seasonal AR parameters
* @param[in] Q Number of seasonal MA parameters
* @param[in] s Seasonal period or 0
* @param[in] k Whether to use an intercept
* @param[in] start_sum At which index to start the sum
* @param[in] start_y First used y index (observation)
* @param[in] start_v First used v index (residual)
*/
template <typename DataT>
__global__ void sum_of_squares_kernel(const DataT* d_y,
const DataT* d_mu,
const DataT* d_ar,
const DataT* d_ma,
const DataT* d_sar,
const DataT* d_sma,
DataT* d_loglike,
int n_obs,
int n_phi,
int n_theta,
int p,
int q,
int P,
int Q,
int s,
int k,
int start_sum,
int start_y,
int start_v)
{
// Load phi, theta and mu to registers
DataT phi, theta;
if (threadIdx.x < n_phi) {
phi = MLCommon::TimeSeries::reduced_polynomial<true>(
blockIdx.x, d_ar, p, d_sar, P, s, threadIdx.x + 1);
}
if (threadIdx.x < n_theta) {
theta = MLCommon::TimeSeries::reduced_polynomial<false>(
blockIdx.x, d_ma, q, d_sma, Q, s, threadIdx.x + 1);
}
DataT mu = k ? d_mu[blockIdx.x] : (DataT)0;
// Shared memory: load y and initialize the residuals
extern __shared__ DataT shared_mem[];
DataT* b_y = shared_mem;
DataT* b_vs = shared_mem + n_obs - start_y;
for (int i = threadIdx.x; i < n_obs - start_y; i += blockDim.x) {
b_y[i] = d_y[n_obs * blockIdx.x + i + start_y];
}
for (int i = threadIdx.x; i < start_sum - start_v; i += blockDim.x) {
b_vs[i] = (DataT)0;
}
// Main loop
char* temp_smem = (char*)(shared_mem + 2 * n_obs - start_y - start_v);
DataT res, ssq = 0;
for (int i = start_sum; i < n_obs; i++) {
__syncthreads();
res = (DataT)0;
res -= threadIdx.x < n_phi ? phi * b_y[i - threadIdx.x - 1 - start_y] : (DataT)0;
res -= threadIdx.x < n_theta ? theta * b_vs[i - threadIdx.x - 1 - start_v] : (DataT)0;
res = raft::blockReduce(res, temp_smem);
if (threadIdx.x == 0) {
res += b_y[i - start_y] - mu;
b_vs[i - start_v] = res;
ssq += res * res;
}
}
// Compute log-likelihood and write it to global memory
if (threadIdx.x == 0) {
d_loglike[blockIdx.x] =
-0.5 * static_cast<DataT>(n_obs) * raft::myLog(ssq / static_cast<DataT>(n_obs - start_sum));
}
}
/**
* Sum-of-squares estimation method
*
* @param[in] handle cuML handle
* @param[in] d_y Series to fit: shape = (n_obs, batch_size)
* @param[in] batch_size Number of time series
* @param[in] n_obs Number of observations in a time series
* @param[in] order ARIMA hyper-parameters
* @param[in] Tparams Transformed parameters
* @param[out] d_loglike Evaluated log-likelihood (device)
* @param[in] truncate Number of observations to skip in the sum
*/
void conditional_sum_of_squares(raft::handle_t& handle,
const double* d_y,
int batch_size,
int n_obs,
const ARIMAOrder& order,
const ARIMAParams<double>& Tparams,
double* d_loglike,
int truncate)
{
ML::PUSH_RANGE(__func__);
auto stream = handle.get_stream();
int n_phi = order.n_phi();
int n_theta = order.n_theta();
int max_lags = ::max(n_phi, n_theta);
int start_sum = ::max(max_lags, truncate);
int start_y = start_sum - n_phi;
int start_v = start_sum - n_theta;
// Compute the sum-of-squares and the log-likelihood
int n_warps = ::max(raft::ceildiv<int>(max_lags, 32), 1);
size_t shared_mem_size = (2 * n_obs - start_y - start_v + n_warps) * sizeof(double);
hipLaunchKernelGGL(( sum_of_squares_kernel), dim3(batch_size), dim3(32 * n_warps), shared_mem_size, stream, d_y,
Tparams.mu,
Tparams.ar,
Tparams.ma,
Tparams.sar,
Tparams.sma,
d_loglike,
n_obs,
n_phi,
n_theta,
order.p,
order.q,
order.P,
order.Q,
order.s,
order.k,
start_sum,
start_y,
start_v);
CUDA_CHECK(hipPeekAtLastError());
ML::POP_RANGE();
}
void batched_loglike(raft::handle_t& handle,
const ARIMAMemory<double>& arima_mem,
const double* d_y,
int batch_size,
int n_obs,
const ARIMAOrder& order,
const ARIMAParams<double>& params,
double* loglike,
double* d_vs,
bool trans,
bool host_loglike,
LoglikeMethod method,
int truncate,
int fc_steps,
double* d_fc,
double level,
double* d_lower,
double* d_upper)
{
ML::PUSH_RANGE(__func__);
auto allocator = handle.get_device_allocator();
auto stream = handle.get_stream();
ARIMAParams<double> Tparams = {arima_mem.Tparams_mu,
arima_mem.Tparams_ar,
arima_mem.Tparams_ma,
arima_mem.Tparams_sar,
arima_mem.Tparams_sma,
arima_mem.Tparams_sigma2};
ASSERT(method == MLE || fc_steps == 0, "Only MLE method is valid for forecasting");
/* Create log-likelihood device array if host pointer is provided */
double* d_loglike = host_loglike ? arima_mem.loglike : loglike;
if (trans) {
MLCommon::TimeSeries::batched_jones_transform(
order, batch_size, false, params, Tparams, allocator, stream);
Tparams.mu = params.mu;
} else {
// non-transformed case: just use original parameters
Tparams = params;
}
if (method == CSS) {
conditional_sum_of_squares(handle, d_y, batch_size, n_obs, order, Tparams, d_loglike, truncate);
} else {
batched_kalman_filter(handle,
arima_mem,
d_y,
n_obs,
Tparams,
order,
batch_size,
d_loglike,
d_vs,
fc_steps,
d_fc,
level,
d_lower,
d_upper);
}
if (host_loglike) {
/* Tranfer log-likelihood device -> host */
raft::update_host(loglike, d_loglike, batch_size, stream);
}
ML::POP_RANGE();
}
void batched_loglike(raft::handle_t& handle,
const ARIMAMemory<double>& arima_mem,
const double* d_y,
int batch_size,
int n_obs,
const ARIMAOrder& order,
const double* d_params,
double* loglike,
double* d_vs,
bool trans,
bool host_loglike,
LoglikeMethod method,
int truncate,
int fc_steps,
double* d_fc,
double level,
double* d_lower,
double* d_upper)
{
ML::PUSH_RANGE(__func__);
// unpack parameters
auto allocator = handle.get_device_allocator();
auto stream = handle.get_stream();
ARIMAParams<double> params = {arima_mem.params_mu,
arima_mem.params_ar,
arima_mem.params_ma,
arima_mem.params_sar,
arima_mem.params_sma,
arima_mem.params_sigma2};
params.unpack(order, batch_size, d_params, stream);
batched_loglike(handle,
arima_mem,
d_y,
batch_size,
n_obs,
order,
params,
loglike,
d_vs,
trans,
host_loglike,
method,
truncate,
fc_steps,
d_fc,
level,
d_lower,
d_upper);
ML::POP_RANGE();
}
void batched_loglike_grad(raft::handle_t& handle,
const ARIMAMemory<double>& arima_mem,
const double* d_y,
int batch_size,
int n_obs,
const ARIMAOrder& order,
const double* d_x,
double* d_grad,
double h,
bool trans,
LoglikeMethod method,
int truncate)
{
ML::PUSH_RANGE(__func__);
auto allocator = handle.get_device_allocator();
auto stream = handle.get_stream();
auto counting = thrust::make_counting_iterator(0);
int N = order.complexity();
// Initialize the perturbed x vector
double* d_x_pert = arima_mem.x_pert;
raft::copy(d_x_pert, d_x, N * batch_size, stream);
double* d_vs = arima_mem.vs;
double* d_ll_base = arima_mem.loglike_base;
double* d_ll_pert = arima_mem.loglike_pert;
// Evaluate the log-likelihood with the given parameter vector
batched_loglike(handle,
arima_mem,
d_y,
batch_size,
n_obs,
order,
d_x,
d_ll_base,
d_vs,
trans,
false,
method,
truncate);
for (int i = 0; i < N; i++) {
// Add the perturbation to the i-th parameter
thrust::for_each(
thrust::hip::par.on(stream), counting, counting + batch_size, [=] __device__(int bid) {
d_x_pert[N * bid + i] = d_x[N * bid + i] + h;
});
// Evaluate the log-likelihood with the positive perturbation
batched_loglike(handle,
arima_mem,
d_y,
batch_size,
n_obs,
order,
d_x_pert,
d_ll_pert,
d_vs,
trans,
false,
method,
truncate);
// First derivative with a first-order accuracy
thrust::for_each(
thrust::hip::par.on(stream), counting, counting + batch_size, [=] __device__(int bid) {
d_grad[N * bid + i] = (d_ll_pert[bid] - d_ll_base[bid]) / h;
});
// Reset the i-th parameter
thrust::for_each(
thrust::hip::par.on(stream), counting, counting + batch_size, [=] __device__(int bid) {
d_x_pert[N * bid + i] = d_x[N * bid + i];
});
}
ML::POP_RANGE();
}
void information_criterion(raft::handle_t& handle,
const ARIMAMemory<double>& arima_mem,
const double* d_y,
int batch_size,
int n_obs,
const ARIMAOrder& order,
const ARIMAParams<double>& params,
double* d_ic,
int ic_type)
{
ML::PUSH_RANGE(__func__);
auto allocator = handle.get_device_allocator();
auto stream = handle.get_stream();
double* d_vs = arima_mem.vs;
/* Compute log-likelihood in d_ic */
batched_loglike(
handle, arima_mem, d_y, batch_size, n_obs, order, params, d_ic, d_vs, false, false, MLE);
/* Compute information criterion from log-likelihood and base term */
MLCommon::Metrics::Batched::information_criterion(
d_ic,
d_ic,
static_cast<MLCommon::Metrics::IC_Type>(ic_type),
order.complexity(),
batch_size,
n_obs - order.n_diff(),
stream);
ML::POP_RANGE();
}
/**
* Test that the parameters are valid for the inverse transform
*
* @tparam isAr Are these (S)AR or (S)MA parameters?
* @param[in] params Parameters
* @param[in] pq p for AR, q for MA, P for SAR, Q for SMA
*/
template <bool isAr>
DI bool test_invparams(const double* params, int pq)
{
double new_params[4];
double tmp[4];
constexpr double coef = isAr ? 1 : -1;
for (int i = 0; i < pq; i++) {
tmp[i] = params[i];
new_params[i] = tmp[i];
}
// Perform inverse transform and stop before atanh step
for (int j = pq - 1; j > 0; --j) {
double a = new_params[j];
for (int k = 0; k < j; ++k) {
tmp[k] = (new_params[k] + coef * a * new_params[j - k - 1]) / (1 - (a * a));
}
for (int iter = 0; iter < j; ++iter) {
new_params[iter] = tmp[iter];
}
}
// Verify that the values are between -1 and 1
bool result = true;
for (int i = 0; i < pq; i++) {
result = result && !(new_params[i] <= -1 || new_params[i] >= 1);
}
return result;
}
/**
* Auxiliary function of _start_params: least square approximation of an
* ARMA model (with or without seasonality)
* @note: in this function the non-seasonal case has s=1, not s=0!
*/
void _arma_least_squares(raft::handle_t& handle,
double* d_ar,
double* d_ma,
double* d_sigma2,
const MLCommon::LinAlg::Batched::Matrix<double>& bm_y,
int p,
int q,
int s,
bool estimate_sigma2,
int k = 0,
double* d_mu = nullptr)
{
const auto& handle_impl = handle;
auto stream = handle_impl.get_stream();
auto cublas_handle = handle_impl.get_cublas_handle();
auto allocator = handle_impl.get_device_allocator();
auto counting = thrust::make_counting_iterator(0);
int batch_size = bm_y.batches();
int n_obs = bm_y.shape().first;
int ps = p * s, qs = q * s;
int p_ar = ::max(ps, 2 * qs);
int r = ::max(p_ar + qs, ps);
if ((q && p_ar >= n_obs - p_ar) || p + q + k >= n_obs - r) {
// Too few observations for the estimate, fill with 0 (1 for sigma2)
if (k) CUDA_CHECK(hipMemsetAsync(d_mu, 0, sizeof(double) * batch_size, stream));
if (p) CUDA_CHECK(hipMemsetAsync(d_ar, 0, sizeof(double) * p * batch_size, stream));
if (q) CUDA_CHECK(hipMemsetAsync(d_ma, 0, sizeof(double) * q * batch_size, stream));
if (estimate_sigma2) {
thrust::device_ptr<double> sigma2_thrust = thrust::device_pointer_cast(d_sigma2);
thrust::fill(thrust::hip::par.on(stream), sigma2_thrust, sigma2_thrust + batch_size, 1.0);
}
return;
}
/* Matrix formed by lag matrices of y and the residuals respectively,
* side by side. The left side will be used to estimate AR, the right
* side to estimate MA */
MLCommon::LinAlg::Batched::Matrix<double> bm_ls_ar_res(
n_obs - r, p + q + k, batch_size, cublas_handle, allocator, stream, false);
int ar_offset = r - ps;
int res_offset = r - p_ar - qs;
// Get residuals from an AR(p_ar) model to estimate the MA parameters
if (q) {
// Create lagged y
int ls_height = n_obs - p_ar;
MLCommon::LinAlg::Batched::Matrix<double> bm_ls =
MLCommon::LinAlg::Batched::b_lagged_mat(bm_y, p_ar);
/* Matrix for the initial AR fit, initialized by copy of y
* (note: this is because gels works in-place ; the matrix has larger
* dimensions than the actual AR fit) */
MLCommon::LinAlg::Batched::Matrix<double> bm_ar_fit =
MLCommon::LinAlg::Batched::b_2dcopy(bm_y, p_ar, 0, ls_height, 1);
// Residual, initialized as offset y to avoid one kernel call
MLCommon::LinAlg::Batched::Matrix<double> bm_residual(bm_ar_fit);
// Initial AR fit
MLCommon::LinAlg::Batched::b_gels(bm_ls, bm_ar_fit);
// Compute residual (technically a gemv)
MLCommon::LinAlg::Batched::b_gemm(
false, false, ls_height, 1, p_ar, -1.0, bm_ls, bm_ar_fit, 1.0, bm_residual);
// Lags of the residual
MLCommon::LinAlg::Batched::b_lagged_mat(
bm_residual, bm_ls_ar_res, q, n_obs - r, res_offset, (n_obs - r) * (k + p), s);
}
// Fill the first column of the matrix with 1 if we fit an intercept
if (k) {
double* d_ls_ar_res = bm_ls_ar_res.raw_data();
thrust::for_each(
thrust::hip::par.on(stream), counting, counting + batch_size, [=] __device__(int bid) {
double* b_ls_ar_res = d_ls_ar_res + bid * (n_obs - r) * (p + q + k);
for (int i = 0; i < n_obs - r; i++) {
b_ls_ar_res[i] = 1.0;
}
});
}
// Lags of y
MLCommon::LinAlg::Batched::b_lagged_mat(
bm_y, bm_ls_ar_res, p, n_obs - r, ar_offset, (n_obs - r) * k, s);
/* Initializing the vector for the ARMA fit
* (note: also in-place as described for AR fit) */
MLCommon::LinAlg::Batched::Matrix<double> bm_arma_fit =
MLCommon::LinAlg::Batched::b_2dcopy(bm_y, r, 0, n_obs - r, 1);
// The residuals will be computed only if sigma2 is requested
MLCommon::LinAlg::Batched::Matrix<double> bm_final_residual(
n_obs - r, 1, batch_size, cublas_handle, allocator, stream, false);
if (estimate_sigma2) {
raft::copy(
bm_final_residual.raw_data(), bm_arma_fit.raw_data(), (n_obs - r) * batch_size, stream);
}
// ARMA fit
MLCommon::LinAlg::Batched::b_gels(bm_ls_ar_res, bm_arma_fit);
// Copy the results in the parameter vectors
const double* d_arma_fit = bm_arma_fit.raw_data();
thrust::for_each(
thrust::hip::par.on(stream), counting, counting + batch_size, [=] __device__(int bid) {
const double* b_arma_fit = d_arma_fit + bid * (n_obs - r);
if (k) { d_mu[bid] = b_arma_fit[0]; }
if (p) {
double* b_ar = d_ar + bid * p;
for (int i = 0; i < p; i++) {
b_ar[i] = b_arma_fit[i + k];
}
}
if (q) {
double* b_ma = d_ma + bid * q;
for (int i = 0; i < q; i++) {
b_ma[i] = b_arma_fit[i + p + k];
}
}
});
if (estimate_sigma2) {
// Compute final residual (technically a gemv)
MLCommon::LinAlg::Batched::b_gemm(false,
false,
n_obs - r,
1,
p + q + k,
-1.0,
bm_ls_ar_res,
bm_arma_fit,
1.0,
bm_final_residual);
// Compute variance
double* d_residual = bm_final_residual.raw_data();
thrust::for_each(
thrust::hip::par.on(stream), counting, counting + batch_size, [=] __device__(int bid) {
double acc = 0.0;
const double* b_residual = d_residual + (n_obs - r) * bid;
for (int i = q; i < n_obs - r; i++) {
double res = b_residual[i];
acc += res * res;
}
d_sigma2[bid] = acc / static_cast<double>(n_obs - r - q);
});
}
// If (S)AR or (S)MA are not valid for the inverse transform, set them to zero
thrust::for_each(
thrust::hip::par.on(stream), counting, counting + batch_size, [=] __device__(int bid) {
if (p) {
double* b_ar = d_ar + bid * p;
bool valid = test_invparams<true>(b_ar, p);
if (!valid) {
for (int ip = 0; ip < p; ip++)
b_ar[ip] = 0;
}
}
if (q) {
double* b_ma = d_ma + bid * q;
bool valid = test_invparams<false>(b_ma, q);
if (!valid) {
for (int iq = 0; iq < q; iq++)
b_ma[iq] = 0;
}
}
});
}
/**
* Auxiliary function of estimate_x0: compute the starting parameters for
* the series pre-processed by estimate_x0
*/
void _start_params(raft::handle_t& handle,
ARIMAParams<double>& params,
const MLCommon::LinAlg::Batched::Matrix<double>& bm_y,
const ARIMAOrder& order)
{
// Estimate an ARMA fit without seasonality
if (order.p + order.q + order.k)
_arma_least_squares(handle,
params.ar,
params.ma,
params.sigma2,
bm_y,
order.p,
order.q,
1,
true,
order.k,
params.mu);
// Estimate a seasonal ARMA fit independantly
if (order.P + order.Q)
_arma_least_squares(handle,
params.sar,
params.sma,
params.sigma2,
bm_y,
order.P,
order.Q,
order.s,
order.p + order.q + order.k == 0);
}
void estimate_x0(raft::handle_t& handle,
ARIMAParams<double>& params,
const double* d_y,
int batch_size,
int n_obs,
const ARIMAOrder& order)
{
ML::PUSH_RANGE(__func__);
const auto& handle_impl = handle;
auto stream = handle_impl.get_stream();
auto cublas_handle = handle_impl.get_cublas_handle();
auto allocator = handle_impl.get_device_allocator();
// Difference if necessary, copy otherwise
MLCommon::LinAlg::Batched::Matrix<double> bm_yd(
n_obs - order.d - order.s * order.D, 1, batch_size, cublas_handle, allocator, stream, false);
MLCommon::TimeSeries::prepare_data(
bm_yd.raw_data(), d_y, batch_size, n_obs, order.d, order.D, order.s, stream);
// Do the computation of the initial parameters
_start_params(handle, params, bm_yd, order);
ML::POP_RANGE();
}
} // namespace ML
| 519c70ad409c4625b0af38683336fb06458b016a.cu | /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include <cmath>
#include <iostream>
#include <vector>
#include <thrust/device_ptr.h>
#include <thrust/fill.h>
#include <thrust/for_each.h>
#include <thrust/iterator/counting_iterator.h>
#include <cuml/tsa/batched_arima.hpp>
#include <cuml/tsa/batched_kalman.hpp>
#include <raft/cudart_utils.h>
#include <common/nvtx.hpp>
#include <cuml/common/device_buffer.hpp>
#include <linalg/batched/matrix.cuh>
#include <metrics/batched/information_criterion.cuh>
#include <raft/cuda_utils.cuh>
#include <raft/handle.hpp>
#include <raft/linalg/matrix_vector_op.cuh>
#include <timeSeries/arima_helpers.cuh>
namespace ML {
void pack(raft::handle_t& handle,
const ARIMAParams<double>& params,
const ARIMAOrder& order,
int batch_size,
double* param_vec)
{
const auto stream = handle.get_stream();
params.pack(order, batch_size, param_vec, stream);
}
void unpack(raft::handle_t& handle,
ARIMAParams<double>& params,
const ARIMAOrder& order,
int batch_size,
const double* param_vec)
{
const auto stream = handle.get_stream();
params.unpack(order, batch_size, param_vec, stream);
}
void batched_diff(raft::handle_t& handle,
double* d_y_diff,
const double* d_y,
int batch_size,
int n_obs,
const ARIMAOrder& order)
{
const auto stream = handle.get_stream();
MLCommon::TimeSeries::prepare_data(
d_y_diff, d_y, batch_size, n_obs, order.d, order.D, order.s, stream);
}
void predict(raft::handle_t& handle,
const ARIMAMemory<double>& arima_mem,
const double* d_y,
int batch_size,
int n_obs,
int start,
int end,
const ARIMAOrder& order,
const ARIMAParams<double>& params,
double* d_y_p,
bool pre_diff,
double level,
double* d_lower,
double* d_upper)
{
ML::PUSH_RANGE(__func__);
auto allocator = handle.get_device_allocator();
const auto stream = handle.get_stream();
bool diff = order.need_diff() && pre_diff && level == 0;
// Prepare data
int n_obs_kf;
const double* d_y_kf;
ARIMAOrder order_after_prep = order;
if (diff) {
n_obs_kf = n_obs - order.n_diff();
MLCommon::TimeSeries::prepare_data(
arima_mem.y_diff, d_y, batch_size, n_obs, order.d, order.D, order.s, stream);
order_after_prep.d = 0;
order_after_prep.D = 0;
d_y_kf = arima_mem.y_diff;
} else {
n_obs_kf = n_obs;
d_y_kf = d_y;
}
double* d_vs = arima_mem.vs;
// Create temporary array for the forecasts
int num_steps = std::max(end - n_obs, 0);
MLCommon::device_buffer<double> fc_buffer(allocator, stream, num_steps * batch_size);
double* d_y_fc = fc_buffer.data();
// Compute the residual and forecast
std::vector<double> loglike = std::vector<double>(batch_size);
/// TODO: use device loglike to avoid useless copy ; part of #2233
batched_loglike(handle,
arima_mem,
d_y_kf,
batch_size,
n_obs_kf,
order_after_prep,
params,
loglike.data(),
d_vs,
false,
true,
MLE,
0,
num_steps,
d_y_fc,
level,
d_lower,
d_upper);
auto counting = thrust::make_counting_iterator(0);
int predict_ld = end - start;
//
// In-sample prediction
//
int res_offset = diff ? order.d + order.s * order.D : 0;
int p_start = std::max(start, res_offset);
int p_end = std::min(n_obs, end);
// The prediction loop starts by filling undefined predictions with NaN,
// then computes the predictions from the observations and residuals
if (start < n_obs) {
thrust::for_each(
thrust::cuda::par.on(stream), counting, counting + batch_size, [=] __device__(int bid) {
d_y_p[0] = 0.0;
for (int i = 0; i < res_offset - start; i++) {
d_y_p[bid * predict_ld + i] = nan("");
}
for (int i = p_start; i < p_end; i++) {
d_y_p[bid * predict_ld + i - start] =
d_y[bid * n_obs + i] - d_vs[bid * n_obs_kf + i - res_offset];
}
});
}
//
// Finalize out-of-sample forecast and copy in-sample predictions
//
if (num_steps) {
if (diff) {
MLCommon::TimeSeries::finalize_forecast(
d_y_fc, d_y, num_steps, batch_size, n_obs, n_obs, order.d, order.D, order.s, stream);
}
// Copy forecast in d_y_p
thrust::for_each(
thrust::cuda::par.on(stream), counting, counting + batch_size, [=] __device__(int bid) {
for (int i = 0; i < num_steps; i++) {
d_y_p[bid * predict_ld + n_obs - start + i] = d_y_fc[num_steps * bid + i];
}
});
/// TODO: 2D copy kernel?
}
ML::POP_RANGE();
}
/**
* Kernel to compute the sum-of-squares log-likelihood estimation
*
* @param[in] d_y Series to fit
* @param[in] d_mu mu parameters
* @param[in] d_ar AR parameters
* @param[in] d_ma MA parameters
* @param[in] d_sar Seasonal AR parameters
* @param[in] d_sma Seasonal MA parameters
* @param[out] d_loglike Evaluated log-likelihood
* @param[in] n_obs Number of observations in a time series
* @param[in] n_phi Number of phi coefficients (combined AR-SAR)
* @param[in] n_theta Number of theta coefficients (combined MA-SMA)
* @param[in] p Number of AR parameters
* @param[in] q Number of MA parameters
* @param[in] P Number of seasonal AR parameters
* @param[in] Q Number of seasonal MA parameters
* @param[in] s Seasonal period or 0
* @param[in] k Whether to use an intercept
* @param[in] start_sum At which index to start the sum
* @param[in] start_y First used y index (observation)
* @param[in] start_v First used v index (residual)
*/
template <typename DataT>
__global__ void sum_of_squares_kernel(const DataT* d_y,
const DataT* d_mu,
const DataT* d_ar,
const DataT* d_ma,
const DataT* d_sar,
const DataT* d_sma,
DataT* d_loglike,
int n_obs,
int n_phi,
int n_theta,
int p,
int q,
int P,
int Q,
int s,
int k,
int start_sum,
int start_y,
int start_v)
{
// Load phi, theta and mu to registers
DataT phi, theta;
if (threadIdx.x < n_phi) {
phi = MLCommon::TimeSeries::reduced_polynomial<true>(
blockIdx.x, d_ar, p, d_sar, P, s, threadIdx.x + 1);
}
if (threadIdx.x < n_theta) {
theta = MLCommon::TimeSeries::reduced_polynomial<false>(
blockIdx.x, d_ma, q, d_sma, Q, s, threadIdx.x + 1);
}
DataT mu = k ? d_mu[blockIdx.x] : (DataT)0;
// Shared memory: load y and initialize the residuals
extern __shared__ DataT shared_mem[];
DataT* b_y = shared_mem;
DataT* b_vs = shared_mem + n_obs - start_y;
for (int i = threadIdx.x; i < n_obs - start_y; i += blockDim.x) {
b_y[i] = d_y[n_obs * blockIdx.x + i + start_y];
}
for (int i = threadIdx.x; i < start_sum - start_v; i += blockDim.x) {
b_vs[i] = (DataT)0;
}
// Main loop
char* temp_smem = (char*)(shared_mem + 2 * n_obs - start_y - start_v);
DataT res, ssq = 0;
for (int i = start_sum; i < n_obs; i++) {
__syncthreads();
res = (DataT)0;
res -= threadIdx.x < n_phi ? phi * b_y[i - threadIdx.x - 1 - start_y] : (DataT)0;
res -= threadIdx.x < n_theta ? theta * b_vs[i - threadIdx.x - 1 - start_v] : (DataT)0;
res = raft::blockReduce(res, temp_smem);
if (threadIdx.x == 0) {
res += b_y[i - start_y] - mu;
b_vs[i - start_v] = res;
ssq += res * res;
}
}
// Compute log-likelihood and write it to global memory
if (threadIdx.x == 0) {
d_loglike[blockIdx.x] =
-0.5 * static_cast<DataT>(n_obs) * raft::myLog(ssq / static_cast<DataT>(n_obs - start_sum));
}
}
/**
* Sum-of-squares estimation method
*
* @param[in] handle cuML handle
* @param[in] d_y Series to fit: shape = (n_obs, batch_size)
* @param[in] batch_size Number of time series
* @param[in] n_obs Number of observations in a time series
* @param[in] order ARIMA hyper-parameters
* @param[in] Tparams Transformed parameters
* @param[out] d_loglike Evaluated log-likelihood (device)
* @param[in] truncate Number of observations to skip in the sum
*/
void conditional_sum_of_squares(raft::handle_t& handle,
const double* d_y,
int batch_size,
int n_obs,
const ARIMAOrder& order,
const ARIMAParams<double>& Tparams,
double* d_loglike,
int truncate)
{
ML::PUSH_RANGE(__func__);
auto stream = handle.get_stream();
int n_phi = order.n_phi();
int n_theta = order.n_theta();
int max_lags = std::max(n_phi, n_theta);
int start_sum = std::max(max_lags, truncate);
int start_y = start_sum - n_phi;
int start_v = start_sum - n_theta;
// Compute the sum-of-squares and the log-likelihood
int n_warps = std::max(raft::ceildiv<int>(max_lags, 32), 1);
size_t shared_mem_size = (2 * n_obs - start_y - start_v + n_warps) * sizeof(double);
sum_of_squares_kernel<<<batch_size, 32 * n_warps, shared_mem_size, stream>>>(d_y,
Tparams.mu,
Tparams.ar,
Tparams.ma,
Tparams.sar,
Tparams.sma,
d_loglike,
n_obs,
n_phi,
n_theta,
order.p,
order.q,
order.P,
order.Q,
order.s,
order.k,
start_sum,
start_y,
start_v);
CUDA_CHECK(cudaPeekAtLastError());
ML::POP_RANGE();
}
void batched_loglike(raft::handle_t& handle,
const ARIMAMemory<double>& arima_mem,
const double* d_y,
int batch_size,
int n_obs,
const ARIMAOrder& order,
const ARIMAParams<double>& params,
double* loglike,
double* d_vs,
bool trans,
bool host_loglike,
LoglikeMethod method,
int truncate,
int fc_steps,
double* d_fc,
double level,
double* d_lower,
double* d_upper)
{
ML::PUSH_RANGE(__func__);
auto allocator = handle.get_device_allocator();
auto stream = handle.get_stream();
ARIMAParams<double> Tparams = {arima_mem.Tparams_mu,
arima_mem.Tparams_ar,
arima_mem.Tparams_ma,
arima_mem.Tparams_sar,
arima_mem.Tparams_sma,
arima_mem.Tparams_sigma2};
ASSERT(method == MLE || fc_steps == 0, "Only MLE method is valid for forecasting");
/* Create log-likelihood device array if host pointer is provided */
double* d_loglike = host_loglike ? arima_mem.loglike : loglike;
if (trans) {
MLCommon::TimeSeries::batched_jones_transform(
order, batch_size, false, params, Tparams, allocator, stream);
Tparams.mu = params.mu;
} else {
// non-transformed case: just use original parameters
Tparams = params;
}
if (method == CSS) {
conditional_sum_of_squares(handle, d_y, batch_size, n_obs, order, Tparams, d_loglike, truncate);
} else {
batched_kalman_filter(handle,
arima_mem,
d_y,
n_obs,
Tparams,
order,
batch_size,
d_loglike,
d_vs,
fc_steps,
d_fc,
level,
d_lower,
d_upper);
}
if (host_loglike) {
/* Tranfer log-likelihood device -> host */
raft::update_host(loglike, d_loglike, batch_size, stream);
}
ML::POP_RANGE();
}
void batched_loglike(raft::handle_t& handle,
const ARIMAMemory<double>& arima_mem,
const double* d_y,
int batch_size,
int n_obs,
const ARIMAOrder& order,
const double* d_params,
double* loglike,
double* d_vs,
bool trans,
bool host_loglike,
LoglikeMethod method,
int truncate,
int fc_steps,
double* d_fc,
double level,
double* d_lower,
double* d_upper)
{
ML::PUSH_RANGE(__func__);
// unpack parameters
auto allocator = handle.get_device_allocator();
auto stream = handle.get_stream();
ARIMAParams<double> params = {arima_mem.params_mu,
arima_mem.params_ar,
arima_mem.params_ma,
arima_mem.params_sar,
arima_mem.params_sma,
arima_mem.params_sigma2};
params.unpack(order, batch_size, d_params, stream);
batched_loglike(handle,
arima_mem,
d_y,
batch_size,
n_obs,
order,
params,
loglike,
d_vs,
trans,
host_loglike,
method,
truncate,
fc_steps,
d_fc,
level,
d_lower,
d_upper);
ML::POP_RANGE();
}
void batched_loglike_grad(raft::handle_t& handle,
const ARIMAMemory<double>& arima_mem,
const double* d_y,
int batch_size,
int n_obs,
const ARIMAOrder& order,
const double* d_x,
double* d_grad,
double h,
bool trans,
LoglikeMethod method,
int truncate)
{
ML::PUSH_RANGE(__func__);
auto allocator = handle.get_device_allocator();
auto stream = handle.get_stream();
auto counting = thrust::make_counting_iterator(0);
int N = order.complexity();
// Initialize the perturbed x vector
double* d_x_pert = arima_mem.x_pert;
raft::copy(d_x_pert, d_x, N * batch_size, stream);
double* d_vs = arima_mem.vs;
double* d_ll_base = arima_mem.loglike_base;
double* d_ll_pert = arima_mem.loglike_pert;
// Evaluate the log-likelihood with the given parameter vector
batched_loglike(handle,
arima_mem,
d_y,
batch_size,
n_obs,
order,
d_x,
d_ll_base,
d_vs,
trans,
false,
method,
truncate);
for (int i = 0; i < N; i++) {
// Add the perturbation to the i-th parameter
thrust::for_each(
thrust::cuda::par.on(stream), counting, counting + batch_size, [=] __device__(int bid) {
d_x_pert[N * bid + i] = d_x[N * bid + i] + h;
});
// Evaluate the log-likelihood with the positive perturbation
batched_loglike(handle,
arima_mem,
d_y,
batch_size,
n_obs,
order,
d_x_pert,
d_ll_pert,
d_vs,
trans,
false,
method,
truncate);
// First derivative with a first-order accuracy
thrust::for_each(
thrust::cuda::par.on(stream), counting, counting + batch_size, [=] __device__(int bid) {
d_grad[N * bid + i] = (d_ll_pert[bid] - d_ll_base[bid]) / h;
});
// Reset the i-th parameter
thrust::for_each(
thrust::cuda::par.on(stream), counting, counting + batch_size, [=] __device__(int bid) {
d_x_pert[N * bid + i] = d_x[N * bid + i];
});
}
ML::POP_RANGE();
}
void information_criterion(raft::handle_t& handle,
const ARIMAMemory<double>& arima_mem,
const double* d_y,
int batch_size,
int n_obs,
const ARIMAOrder& order,
const ARIMAParams<double>& params,
double* d_ic,
int ic_type)
{
ML::PUSH_RANGE(__func__);
auto allocator = handle.get_device_allocator();
auto stream = handle.get_stream();
double* d_vs = arima_mem.vs;
/* Compute log-likelihood in d_ic */
batched_loglike(
handle, arima_mem, d_y, batch_size, n_obs, order, params, d_ic, d_vs, false, false, MLE);
/* Compute information criterion from log-likelihood and base term */
MLCommon::Metrics::Batched::information_criterion(
d_ic,
d_ic,
static_cast<MLCommon::Metrics::IC_Type>(ic_type),
order.complexity(),
batch_size,
n_obs - order.n_diff(),
stream);
ML::POP_RANGE();
}
/**
* Test that the parameters are valid for the inverse transform
*
* @tparam isAr Are these (S)AR or (S)MA parameters?
* @param[in] params Parameters
* @param[in] pq p for AR, q for MA, P for SAR, Q for SMA
*/
template <bool isAr>
DI bool test_invparams(const double* params, int pq)
{
double new_params[4];
double tmp[4];
constexpr double coef = isAr ? 1 : -1;
for (int i = 0; i < pq; i++) {
tmp[i] = params[i];
new_params[i] = tmp[i];
}
// Perform inverse transform and stop before atanh step
for (int j = pq - 1; j > 0; --j) {
double a = new_params[j];
for (int k = 0; k < j; ++k) {
tmp[k] = (new_params[k] + coef * a * new_params[j - k - 1]) / (1 - (a * a));
}
for (int iter = 0; iter < j; ++iter) {
new_params[iter] = tmp[iter];
}
}
// Verify that the values are between -1 and 1
bool result = true;
for (int i = 0; i < pq; i++) {
result = result && !(new_params[i] <= -1 || new_params[i] >= 1);
}
return result;
}
/**
* Auxiliary function of _start_params: least square approximation of an
* ARMA model (with or without seasonality)
* @note: in this function the non-seasonal case has s=1, not s=0!
*/
void _arma_least_squares(raft::handle_t& handle,
double* d_ar,
double* d_ma,
double* d_sigma2,
const MLCommon::LinAlg::Batched::Matrix<double>& bm_y,
int p,
int q,
int s,
bool estimate_sigma2,
int k = 0,
double* d_mu = nullptr)
{
const auto& handle_impl = handle;
auto stream = handle_impl.get_stream();
auto cublas_handle = handle_impl.get_cublas_handle();
auto allocator = handle_impl.get_device_allocator();
auto counting = thrust::make_counting_iterator(0);
int batch_size = bm_y.batches();
int n_obs = bm_y.shape().first;
int ps = p * s, qs = q * s;
int p_ar = std::max(ps, 2 * qs);
int r = std::max(p_ar + qs, ps);
if ((q && p_ar >= n_obs - p_ar) || p + q + k >= n_obs - r) {
// Too few observations for the estimate, fill with 0 (1 for sigma2)
if (k) CUDA_CHECK(cudaMemsetAsync(d_mu, 0, sizeof(double) * batch_size, stream));
if (p) CUDA_CHECK(cudaMemsetAsync(d_ar, 0, sizeof(double) * p * batch_size, stream));
if (q) CUDA_CHECK(cudaMemsetAsync(d_ma, 0, sizeof(double) * q * batch_size, stream));
if (estimate_sigma2) {
thrust::device_ptr<double> sigma2_thrust = thrust::device_pointer_cast(d_sigma2);
thrust::fill(thrust::cuda::par.on(stream), sigma2_thrust, sigma2_thrust + batch_size, 1.0);
}
return;
}
/* Matrix formed by lag matrices of y and the residuals respectively,
* side by side. The left side will be used to estimate AR, the right
* side to estimate MA */
MLCommon::LinAlg::Batched::Matrix<double> bm_ls_ar_res(
n_obs - r, p + q + k, batch_size, cublas_handle, allocator, stream, false);
int ar_offset = r - ps;
int res_offset = r - p_ar - qs;
// Get residuals from an AR(p_ar) model to estimate the MA parameters
if (q) {
// Create lagged y
int ls_height = n_obs - p_ar;
MLCommon::LinAlg::Batched::Matrix<double> bm_ls =
MLCommon::LinAlg::Batched::b_lagged_mat(bm_y, p_ar);
/* Matrix for the initial AR fit, initialized by copy of y
* (note: this is because gels works in-place ; the matrix has larger
* dimensions than the actual AR fit) */
MLCommon::LinAlg::Batched::Matrix<double> bm_ar_fit =
MLCommon::LinAlg::Batched::b_2dcopy(bm_y, p_ar, 0, ls_height, 1);
// Residual, initialized as offset y to avoid one kernel call
MLCommon::LinAlg::Batched::Matrix<double> bm_residual(bm_ar_fit);
// Initial AR fit
MLCommon::LinAlg::Batched::b_gels(bm_ls, bm_ar_fit);
// Compute residual (technically a gemv)
MLCommon::LinAlg::Batched::b_gemm(
false, false, ls_height, 1, p_ar, -1.0, bm_ls, bm_ar_fit, 1.0, bm_residual);
// Lags of the residual
MLCommon::LinAlg::Batched::b_lagged_mat(
bm_residual, bm_ls_ar_res, q, n_obs - r, res_offset, (n_obs - r) * (k + p), s);
}
// Fill the first column of the matrix with 1 if we fit an intercept
if (k) {
double* d_ls_ar_res = bm_ls_ar_res.raw_data();
thrust::for_each(
thrust::cuda::par.on(stream), counting, counting + batch_size, [=] __device__(int bid) {
double* b_ls_ar_res = d_ls_ar_res + bid * (n_obs - r) * (p + q + k);
for (int i = 0; i < n_obs - r; i++) {
b_ls_ar_res[i] = 1.0;
}
});
}
// Lags of y
MLCommon::LinAlg::Batched::b_lagged_mat(
bm_y, bm_ls_ar_res, p, n_obs - r, ar_offset, (n_obs - r) * k, s);
/* Initializing the vector for the ARMA fit
* (note: also in-place as described for AR fit) */
MLCommon::LinAlg::Batched::Matrix<double> bm_arma_fit =
MLCommon::LinAlg::Batched::b_2dcopy(bm_y, r, 0, n_obs - r, 1);
// The residuals will be computed only if sigma2 is requested
MLCommon::LinAlg::Batched::Matrix<double> bm_final_residual(
n_obs - r, 1, batch_size, cublas_handle, allocator, stream, false);
if (estimate_sigma2) {
raft::copy(
bm_final_residual.raw_data(), bm_arma_fit.raw_data(), (n_obs - r) * batch_size, stream);
}
// ARMA fit
MLCommon::LinAlg::Batched::b_gels(bm_ls_ar_res, bm_arma_fit);
// Copy the results in the parameter vectors
const double* d_arma_fit = bm_arma_fit.raw_data();
thrust::for_each(
thrust::cuda::par.on(stream), counting, counting + batch_size, [=] __device__(int bid) {
const double* b_arma_fit = d_arma_fit + bid * (n_obs - r);
if (k) { d_mu[bid] = b_arma_fit[0]; }
if (p) {
double* b_ar = d_ar + bid * p;
for (int i = 0; i < p; i++) {
b_ar[i] = b_arma_fit[i + k];
}
}
if (q) {
double* b_ma = d_ma + bid * q;
for (int i = 0; i < q; i++) {
b_ma[i] = b_arma_fit[i + p + k];
}
}
});
if (estimate_sigma2) {
// Compute final residual (technically a gemv)
MLCommon::LinAlg::Batched::b_gemm(false,
false,
n_obs - r,
1,
p + q + k,
-1.0,
bm_ls_ar_res,
bm_arma_fit,
1.0,
bm_final_residual);
// Compute variance
double* d_residual = bm_final_residual.raw_data();
thrust::for_each(
thrust::cuda::par.on(stream), counting, counting + batch_size, [=] __device__(int bid) {
double acc = 0.0;
const double* b_residual = d_residual + (n_obs - r) * bid;
for (int i = q; i < n_obs - r; i++) {
double res = b_residual[i];
acc += res * res;
}
d_sigma2[bid] = acc / static_cast<double>(n_obs - r - q);
});
}
// If (S)AR or (S)MA are not valid for the inverse transform, set them to zero
thrust::for_each(
thrust::cuda::par.on(stream), counting, counting + batch_size, [=] __device__(int bid) {
if (p) {
double* b_ar = d_ar + bid * p;
bool valid = test_invparams<true>(b_ar, p);
if (!valid) {
for (int ip = 0; ip < p; ip++)
b_ar[ip] = 0;
}
}
if (q) {
double* b_ma = d_ma + bid * q;
bool valid = test_invparams<false>(b_ma, q);
if (!valid) {
for (int iq = 0; iq < q; iq++)
b_ma[iq] = 0;
}
}
});
}
/**
* Auxiliary function of estimate_x0: compute the starting parameters for
* the series pre-processed by estimate_x0
*/
void _start_params(raft::handle_t& handle,
ARIMAParams<double>& params,
const MLCommon::LinAlg::Batched::Matrix<double>& bm_y,
const ARIMAOrder& order)
{
// Estimate an ARMA fit without seasonality
if (order.p + order.q + order.k)
_arma_least_squares(handle,
params.ar,
params.ma,
params.sigma2,
bm_y,
order.p,
order.q,
1,
true,
order.k,
params.mu);
// Estimate a seasonal ARMA fit independantly
if (order.P + order.Q)
_arma_least_squares(handle,
params.sar,
params.sma,
params.sigma2,
bm_y,
order.P,
order.Q,
order.s,
order.p + order.q + order.k == 0);
}
void estimate_x0(raft::handle_t& handle,
ARIMAParams<double>& params,
const double* d_y,
int batch_size,
int n_obs,
const ARIMAOrder& order)
{
ML::PUSH_RANGE(__func__);
const auto& handle_impl = handle;
auto stream = handle_impl.get_stream();
auto cublas_handle = handle_impl.get_cublas_handle();
auto allocator = handle_impl.get_device_allocator();
// Difference if necessary, copy otherwise
MLCommon::LinAlg::Batched::Matrix<double> bm_yd(
n_obs - order.d - order.s * order.D, 1, batch_size, cublas_handle, allocator, stream, false);
MLCommon::TimeSeries::prepare_data(
bm_yd.raw_data(), d_y, batch_size, n_obs, order.d, order.D, order.s, stream);
// Do the computation of the initial parameters
_start_params(handle, params, bm_yd, order);
ML::POP_RANGE();
}
} // namespace ML
|
5cc3482d72f358a8af654443f6853c639b6994cc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<iostream>
#include<cstdio>
#include<opencv2/core/core.hpp>
#include<opencv2/highgui/highgui.hpp>
#include<opencv2/imgproc/imgproc.hpp>
#include<cuda_runtime.h>
#include<time.h>
using std::cout;
using std::endl;
__global__ void GPUFrameProcessor(unsigned char* FrameA,unsigned char* FrameB,unsigned char* Frame,unsigned char* Gray,unsigned char* Bin,unsigned char* Ero,unsigned char* Dil,unsigned char* ExA,unsigned char* ExB,unsigned char* ExC,unsigned char* ExD,unsigned char* ExE,unsigned char* ExF,unsigned char* FrameF,int width,int height,int colorWidthStep, int grayWidthStep)
{
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
if((xIndex>2) && (yIndex>2) && (xIndex<width-2) && (yIndex<height-2))
{
const int color_tid = yIndex * colorWidthStep + (3 * xIndex);
const int gray_tid = yIndex * grayWidthStep + xIndex;
Frame[color_tid]=FrameB[color_tid]-FrameA[color_tid];
Frame[color_tid+1]=FrameB[color_tid+1]-FrameA[color_tid+1];
Frame[color_tid+2]=FrameB[color_tid+2]-FrameA[color_tid+2];
__syncthreads();
const unsigned char blue = Frame[color_tid];
const unsigned char green = Frame[color_tid + 1];
const unsigned char red = Frame[color_tid + 2];
const float gray = red * 0.3f + green * 0.59f + blue * 0.11f;
Gray[gray_tid] = static_cast<unsigned char>(gray);
__syncthreads();
if(Gray[gray_tid]>220)
Bin[gray_tid]=255;
else
Bin[gray_tid]=0;
__syncthreads();
const int tidA = (yIndex) * grayWidthStep + (xIndex); // x , y
const int tidB = (yIndex-1) * grayWidthStep + (xIndex); // x , y-1
//const int tidC = (yIndex+1) * grayWidthStep + (xIndex); // x , y+1
const int tidD = (yIndex) * grayWidthStep + (xIndex-1); // x-1 , y
const int tidE = (yIndex) * grayWidthStep + (xIndex+1); // x+1 , y
const int tidF = (yIndex-1) * grayWidthStep + (xIndex-1); // x-1 , y-1
const int tidG = (yIndex-1) * grayWidthStep + (xIndex+1); // x+1 , y-1
//const int tidH = (yIndex+1) * grayWidthStep + (xIndex-1); // x-1 , y+1
//const int tidI = (yIndex+1) * grayWidthStep + (xIndex+1); // x+1 , y+1
/*const int tidJ = (yIndex) * grayWidthStep + (xIndex-2); // x-2 , y
const int tidK = (yIndex) * grayWidthStep + (xIndex+2); // x+2 , y
const int tidL = (yIndex-1) * grayWidthStep + (xIndex-2); // x-2 , y-1
const int tidM = (yIndex-1) * grayWidthStep + (xIndex+2); // x+2 , y-1
const int tidN = (yIndex+1) * grayWidthStep + (xIndex-2); // x-1 , y+1
const int tidO = (yIndex+1) * grayWidthStep + (xIndex+2); // x+1 , y+1
const int tidP = (yIndex-2) * grayWidthStep + (xIndex-1); // x-1 , y-1
const int tidQ = (yIndex-2) * grayWidthStep + (xIndex); // x , y-2
const int tidR = (yIndex-2) * grayWidthStep + (xIndex+1); // x+1 , y-2
const int tidS = (yIndex+2) * grayWidthStep + (xIndex-1); // x+2 , y-1
const int tidT = (yIndex+2) * grayWidthStep + (xIndex); // x-1 , y+1
const int tidU = (yIndex+2) * grayWidthStep + (xIndex+1); // x+1 , y+1
*/
/**/
if((Bin[tidA]>100)&&(Bin[tidB]>100)&&(Bin[tidD]>100)&&(Bin[tidE]>100)&&(Bin[tidG]>100)&&(Bin[tidF]>100))
Ero[gray_tid]=255;
else
Ero[gray_tid]=0;
__syncthreads();
if((Ero[tidA]>100)&&(Ero[tidB]>100)&&(Ero[tidD]>100)&&(Ero[tidE]>100)&&(Ero[tidG]>100)&&(Ero[tidF]>100))
Dil[gray_tid]=255;
else
Dil[gray_tid]=0;
__syncthreads();
if((Dil[tidA]>100)&&(Dil[tidB]>100)&&(Dil[tidD]>100)&&(Dil[tidE]>100)&&(Dil[tidG]>100)&&(Dil[tidF]>100))
ExA[gray_tid]=255;
else
ExA[gray_tid]=0;
__syncthreads();
int i,j;
float Sum=0;
for(i=-3;i<=3;i++)
for(j=-3;j<=3;j++)
{
if(ExA[(yIndex+i)*grayWidthStep+(xIndex+j)]>100)
Sum++;
}
if(Sum>0)
ExB[tidA]=255;
else
ExB[tidA]=0;
__syncthreads();
Sum=0;
for(i=-3;i<=3;i++)
for(j=-3;j<=3;j++)
{
if( ExB[(yIndex+i)*grayWidthStep+(xIndex+j)]>100)
Sum++;
}
if(Sum>0)
ExC[tidA]=255;
else
ExC[tidA]=0;
__syncthreads();
Sum=0;
for(i=-3;i<=3;i++)
for(j=-3;j<=3;j++)
{
if(ExC[(yIndex+i)*grayWidthStep+(xIndex+j)]>100)
Sum++;
}
if(Sum>0)
ExD[tidA]=255;
else
ExD[tidA]=0;
__syncthreads();
Sum=0;
for(i=-3;i<=3;i++)
for(j=-3;j<=3;j++)
{
if(ExD[(yIndex+i)*grayWidthStep+(xIndex+j)]>100)
Sum++;
}
if(Sum>0)
ExE[tidA]=255;
else
ExE[tidA]=0;
__syncthreads();
Sum=0;
for(i=-3;i<=3;i++)
for(j=-3;j<=3;j++)
{
if(ExE[(yIndex+i)*grayWidthStep+(xIndex+j)]>100)
Sum++;
}
if(Sum>0)
ExF[tidA]=255;
else
ExF[tidA]=0;
__syncthreads();
Sum=0;
for(i=-3;i<=3;i++)
for(j=-3;j<=3;j++)
{
if(ExF[(yIndex+i)*grayWidthStep+(xIndex+j)]>100)
Sum++;
}
if(Sum>0)
FrameF[tidA]=255;
else
FrameF[tidA]=0;
}
}
void GPUResourceManager(const cv::Mat& FrameA, const cv::Mat& FrameB, cv::Mat& FrameFinal)
{
const int colorBytes = FrameA.step * FrameA.rows;
const int grayBytes = FrameFinal.step * FrameFinal.rows;
unsigned char *D_FrameA, *D_FrameB, *D_Frame;
unsigned char *D_Gray, *D_Bin, *D_Ero, *D_Dil, *D_ExA, *D_ExB, *D_ExC,*D_ExD,*D_ExE,*D_ExF,*D_FrameFinal;
hipMalloc<unsigned char>(&D_FrameA,colorBytes);
hipMalloc<unsigned char>(&D_FrameB,colorBytes);
hipMalloc<unsigned char>(&D_Frame,colorBytes);
hipMalloc<unsigned char>(&D_Gray,grayBytes);
hipMalloc<unsigned char>(&D_Bin,grayBytes);
hipMalloc<unsigned char>(&D_Ero,grayBytes);
hipMalloc<unsigned char>(&D_Dil,grayBytes);
hipMalloc<unsigned char>(&D_ExA,grayBytes);
hipMalloc<unsigned char>(&D_ExB,grayBytes);
hipMalloc<unsigned char>(&D_ExC,grayBytes);
hipMalloc<unsigned char>(&D_ExD,grayBytes);
hipMalloc<unsigned char>(&D_ExE,grayBytes);
hipMalloc<unsigned char>(&D_ExF,grayBytes);
hipMalloc<unsigned char>(&D_FrameFinal,grayBytes);
hipMemcpy(D_FrameA, FrameA.ptr(),colorBytes,hipMemcpyHostToDevice);
hipMemcpy(D_FrameB, FrameB.ptr(),colorBytes,hipMemcpyHostToDevice);
const dim3 block(32,32);
const dim3 grid((FrameA.cols + block.x - 1)/block.x, (FrameA.rows + block.y - 1)/block.y);
hipLaunchKernelGGL(( GPUFrameProcessor), dim3(grid),dim3(block), 0, 0, D_FrameA,D_FrameB,D_Frame,D_Gray,D_Bin,D_Ero,D_Dil,D_ExA,D_ExB,D_ExC,D_ExD,D_ExE,D_ExF,
D_FrameFinal,FrameA.cols,FrameA.rows,FrameA.step,FrameFinal.step);
hipDeviceSynchronize();
hipMemcpy(FrameFinal.ptr(),D_FrameFinal,grayBytes,hipMemcpyDeviceToHost);
hipFree(D_FrameA);
hipFree(D_FrameB);
hipFree(D_Frame);
hipFree(D_Gray);
hipFree(D_Bin);
hipFree(D_Ero);
hipFree(D_Dil);
hipFree(D_ExA);
hipFree(D_ExB);
hipFree(D_ExC);
hipFree(D_ExD);
hipFree(D_ExE);
hipFree(D_ExF);
hipFree(D_FrameFinal);
}
using namespace std;
using namespace cv;
void DrawContour(const cv::Mat& FrameFinal, cv::Mat& InputA)
{
RNG rng(12345);
vector<vector<Point> >contours;
vector<Vec4i>hierarchy;
findContours(FrameFinal,contours,hierarchy,CV_RETR_TREE,CV_CHAIN_APPROX_SIMPLE,Point(0, 0));
vector<vector<Point> > contours_poly(contours.size());
vector<Rect>boundRect(contours.size());
vector<Point2f>center(contours.size());
vector<float>radius(contours.size());
for(int i=0;i<contours.size();i++)
{
approxPolyDP(Mat(contours[i]),contours_poly[i],3,true);
boundRect[i]=boundingRect(Mat(contours_poly[i]));
minEnclosingCircle((Mat)contours_poly[i],center[i],radius[i]);
}
for(int i=0;i<contours.size();i++)
{
Scalar color = Scalar(0,0,255);
rectangle(InputA,boundRect[i].tl(),boundRect[i].br(),color,2,8,0);
}
}
int main(int argc, char** argv)
{
Mat InputA;
Mat InputB;
Mat InputC;
Mat InputD;
cv::VideoCapture capA;
cv::VideoCapture capB;
capA.open(0);
capB.open(1);
clock_t Time_Start, Time_End, Time_Difference;
double Time;
while(1)
{
capA>>InputA;
capA>>InputB;
capB>>InputC;
capB>>InputD;
cv::Mat FrameFinalA(InputA.rows,InputA.cols,CV_8U);
cv::Mat FrameFinalB(InputA.rows,InputA.cols,CV_8U);
Time_Start=clock();
GPUResourceManager(InputA,InputB,FrameFinalA);
GPUResourceManager(InputC,InputD,FrameFinalB);
Time_End=clock();
Time_Difference=Time_End-Time_Start;
Time=Time_Difference/(double)CLOCKS_PER_SEC ;
printf ("GPU Frame Rate = %f FPS\n",2/Time);
DrawContour(FrameFinalA,InputA);
DrawContour(FrameFinalB,InputC);
cv::imshow("GPU Tracking CAM A",InputA);
cv::imshow("GPU Tracking CAM B",InputC);
if(cv::waitKey(33)>=0) break;
}
return 0;
}
| 5cc3482d72f358a8af654443f6853c639b6994cc.cu | #include<iostream>
#include<cstdio>
#include<opencv2/core/core.hpp>
#include<opencv2/highgui/highgui.hpp>
#include<opencv2/imgproc/imgproc.hpp>
#include<cuda_runtime.h>
#include<time.h>
using std::cout;
using std::endl;
__global__ void GPUFrameProcessor(unsigned char* FrameA,unsigned char* FrameB,unsigned char* Frame,unsigned char* Gray,unsigned char* Bin,unsigned char* Ero,unsigned char* Dil,unsigned char* ExA,unsigned char* ExB,unsigned char* ExC,unsigned char* ExD,unsigned char* ExE,unsigned char* ExF,unsigned char* FrameF,int width,int height,int colorWidthStep, int grayWidthStep)
{
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
if((xIndex>2) && (yIndex>2) && (xIndex<width-2) && (yIndex<height-2))
{
const int color_tid = yIndex * colorWidthStep + (3 * xIndex);
const int gray_tid = yIndex * grayWidthStep + xIndex;
Frame[color_tid]=FrameB[color_tid]-FrameA[color_tid];
Frame[color_tid+1]=FrameB[color_tid+1]-FrameA[color_tid+1];
Frame[color_tid+2]=FrameB[color_tid+2]-FrameA[color_tid+2];
__syncthreads();
const unsigned char blue = Frame[color_tid];
const unsigned char green = Frame[color_tid + 1];
const unsigned char red = Frame[color_tid + 2];
const float gray = red * 0.3f + green * 0.59f + blue * 0.11f;
Gray[gray_tid] = static_cast<unsigned char>(gray);
__syncthreads();
if(Gray[gray_tid]>220)
Bin[gray_tid]=255;
else
Bin[gray_tid]=0;
__syncthreads();
const int tidA = (yIndex) * grayWidthStep + (xIndex); // x , y
const int tidB = (yIndex-1) * grayWidthStep + (xIndex); // x , y-1
//const int tidC = (yIndex+1) * grayWidthStep + (xIndex); // x , y+1
const int tidD = (yIndex) * grayWidthStep + (xIndex-1); // x-1 , y
const int tidE = (yIndex) * grayWidthStep + (xIndex+1); // x+1 , y
const int tidF = (yIndex-1) * grayWidthStep + (xIndex-1); // x-1 , y-1
const int tidG = (yIndex-1) * grayWidthStep + (xIndex+1); // x+1 , y-1
//const int tidH = (yIndex+1) * grayWidthStep + (xIndex-1); // x-1 , y+1
//const int tidI = (yIndex+1) * grayWidthStep + (xIndex+1); // x+1 , y+1
/*const int tidJ = (yIndex) * grayWidthStep + (xIndex-2); // x-2 , y
const int tidK = (yIndex) * grayWidthStep + (xIndex+2); // x+2 , y
const int tidL = (yIndex-1) * grayWidthStep + (xIndex-2); // x-2 , y-1
const int tidM = (yIndex-1) * grayWidthStep + (xIndex+2); // x+2 , y-1
const int tidN = (yIndex+1) * grayWidthStep + (xIndex-2); // x-1 , y+1
const int tidO = (yIndex+1) * grayWidthStep + (xIndex+2); // x+1 , y+1
const int tidP = (yIndex-2) * grayWidthStep + (xIndex-1); // x-1 , y-1
const int tidQ = (yIndex-2) * grayWidthStep + (xIndex); // x , y-2
const int tidR = (yIndex-2) * grayWidthStep + (xIndex+1); // x+1 , y-2
const int tidS = (yIndex+2) * grayWidthStep + (xIndex-1); // x+2 , y-1
const int tidT = (yIndex+2) * grayWidthStep + (xIndex); // x-1 , y+1
const int tidU = (yIndex+2) * grayWidthStep + (xIndex+1); // x+1 , y+1
*/
/**/
if((Bin[tidA]>100)&&(Bin[tidB]>100)&&(Bin[tidD]>100)&&(Bin[tidE]>100)&&(Bin[tidG]>100)&&(Bin[tidF]>100))
Ero[gray_tid]=255;
else
Ero[gray_tid]=0;
__syncthreads();
if((Ero[tidA]>100)&&(Ero[tidB]>100)&&(Ero[tidD]>100)&&(Ero[tidE]>100)&&(Ero[tidG]>100)&&(Ero[tidF]>100))
Dil[gray_tid]=255;
else
Dil[gray_tid]=0;
__syncthreads();
if((Dil[tidA]>100)&&(Dil[tidB]>100)&&(Dil[tidD]>100)&&(Dil[tidE]>100)&&(Dil[tidG]>100)&&(Dil[tidF]>100))
ExA[gray_tid]=255;
else
ExA[gray_tid]=0;
__syncthreads();
int i,j;
float Sum=0;
for(i=-3;i<=3;i++)
for(j=-3;j<=3;j++)
{
if(ExA[(yIndex+i)*grayWidthStep+(xIndex+j)]>100)
Sum++;
}
if(Sum>0)
ExB[tidA]=255;
else
ExB[tidA]=0;
__syncthreads();
Sum=0;
for(i=-3;i<=3;i++)
for(j=-3;j<=3;j++)
{
if( ExB[(yIndex+i)*grayWidthStep+(xIndex+j)]>100)
Sum++;
}
if(Sum>0)
ExC[tidA]=255;
else
ExC[tidA]=0;
__syncthreads();
Sum=0;
for(i=-3;i<=3;i++)
for(j=-3;j<=3;j++)
{
if(ExC[(yIndex+i)*grayWidthStep+(xIndex+j)]>100)
Sum++;
}
if(Sum>0)
ExD[tidA]=255;
else
ExD[tidA]=0;
__syncthreads();
Sum=0;
for(i=-3;i<=3;i++)
for(j=-3;j<=3;j++)
{
if(ExD[(yIndex+i)*grayWidthStep+(xIndex+j)]>100)
Sum++;
}
if(Sum>0)
ExE[tidA]=255;
else
ExE[tidA]=0;
__syncthreads();
Sum=0;
for(i=-3;i<=3;i++)
for(j=-3;j<=3;j++)
{
if(ExE[(yIndex+i)*grayWidthStep+(xIndex+j)]>100)
Sum++;
}
if(Sum>0)
ExF[tidA]=255;
else
ExF[tidA]=0;
__syncthreads();
Sum=0;
for(i=-3;i<=3;i++)
for(j=-3;j<=3;j++)
{
if(ExF[(yIndex+i)*grayWidthStep+(xIndex+j)]>100)
Sum++;
}
if(Sum>0)
FrameF[tidA]=255;
else
FrameF[tidA]=0;
}
}
void GPUResourceManager(const cv::Mat& FrameA, const cv::Mat& FrameB, cv::Mat& FrameFinal)
{
const int colorBytes = FrameA.step * FrameA.rows;
const int grayBytes = FrameFinal.step * FrameFinal.rows;
unsigned char *D_FrameA, *D_FrameB, *D_Frame;
unsigned char *D_Gray, *D_Bin, *D_Ero, *D_Dil, *D_ExA, *D_ExB, *D_ExC,*D_ExD,*D_ExE,*D_ExF,*D_FrameFinal;
cudaMalloc<unsigned char>(&D_FrameA,colorBytes);
cudaMalloc<unsigned char>(&D_FrameB,colorBytes);
cudaMalloc<unsigned char>(&D_Frame,colorBytes);
cudaMalloc<unsigned char>(&D_Gray,grayBytes);
cudaMalloc<unsigned char>(&D_Bin,grayBytes);
cudaMalloc<unsigned char>(&D_Ero,grayBytes);
cudaMalloc<unsigned char>(&D_Dil,grayBytes);
cudaMalloc<unsigned char>(&D_ExA,grayBytes);
cudaMalloc<unsigned char>(&D_ExB,grayBytes);
cudaMalloc<unsigned char>(&D_ExC,grayBytes);
cudaMalloc<unsigned char>(&D_ExD,grayBytes);
cudaMalloc<unsigned char>(&D_ExE,grayBytes);
cudaMalloc<unsigned char>(&D_ExF,grayBytes);
cudaMalloc<unsigned char>(&D_FrameFinal,grayBytes);
cudaMemcpy(D_FrameA, FrameA.ptr(),colorBytes,cudaMemcpyHostToDevice);
cudaMemcpy(D_FrameB, FrameB.ptr(),colorBytes,cudaMemcpyHostToDevice);
const dim3 block(32,32);
const dim3 grid((FrameA.cols + block.x - 1)/block.x, (FrameA.rows + block.y - 1)/block.y);
GPUFrameProcessor<<<grid,block>>>(D_FrameA,D_FrameB,D_Frame,D_Gray,D_Bin,D_Ero,D_Dil,D_ExA,D_ExB,D_ExC,D_ExD,D_ExE,D_ExF,
D_FrameFinal,FrameA.cols,FrameA.rows,FrameA.step,FrameFinal.step);
cudaDeviceSynchronize();
cudaMemcpy(FrameFinal.ptr(),D_FrameFinal,grayBytes,cudaMemcpyDeviceToHost);
cudaFree(D_FrameA);
cudaFree(D_FrameB);
cudaFree(D_Frame);
cudaFree(D_Gray);
cudaFree(D_Bin);
cudaFree(D_Ero);
cudaFree(D_Dil);
cudaFree(D_ExA);
cudaFree(D_ExB);
cudaFree(D_ExC);
cudaFree(D_ExD);
cudaFree(D_ExE);
cudaFree(D_ExF);
cudaFree(D_FrameFinal);
}
using namespace std;
using namespace cv;
void DrawContour(const cv::Mat& FrameFinal, cv::Mat& InputA)
{
RNG rng(12345);
vector<vector<Point> >contours;
vector<Vec4i>hierarchy;
findContours(FrameFinal,contours,hierarchy,CV_RETR_TREE,CV_CHAIN_APPROX_SIMPLE,Point(0, 0));
vector<vector<Point> > contours_poly(contours.size());
vector<Rect>boundRect(contours.size());
vector<Point2f>center(contours.size());
vector<float>radius(contours.size());
for(int i=0;i<contours.size();i++)
{
approxPolyDP(Mat(contours[i]),contours_poly[i],3,true);
boundRect[i]=boundingRect(Mat(contours_poly[i]));
minEnclosingCircle((Mat)contours_poly[i],center[i],radius[i]);
}
for(int i=0;i<contours.size();i++)
{
Scalar color = Scalar(0,0,255);
rectangle(InputA,boundRect[i].tl(),boundRect[i].br(),color,2,8,0);
}
}
int main(int argc, char** argv)
{
Mat InputA;
Mat InputB;
Mat InputC;
Mat InputD;
cv::VideoCapture capA;
cv::VideoCapture capB;
capA.open(0);
capB.open(1);
clock_t Time_Start, Time_End, Time_Difference;
double Time;
while(1)
{
capA>>InputA;
capA>>InputB;
capB>>InputC;
capB>>InputD;
cv::Mat FrameFinalA(InputA.rows,InputA.cols,CV_8U);
cv::Mat FrameFinalB(InputA.rows,InputA.cols,CV_8U);
Time_Start=clock();
GPUResourceManager(InputA,InputB,FrameFinalA);
GPUResourceManager(InputC,InputD,FrameFinalB);
Time_End=clock();
Time_Difference=Time_End-Time_Start;
Time=Time_Difference/(double)CLOCKS_PER_SEC ;
printf ("GPU Frame Rate = %f FPS\n",2/Time);
DrawContour(FrameFinalA,InputA);
DrawContour(FrameFinalB,InputC);
cv::imshow("GPU Tracking CAM A",InputA);
cv::imshow("GPU Tracking CAM B",InputC);
if(cv::waitKey(33)>=0) break;
}
return 0;
}
|
6c0f66cb257247723d5cd11eec88ba4bcbb0125d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020 NVIDIA Corporation.
* Copyright (c) 2018-2020 Chris Choy ([email protected]).
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
* Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
* of the code.
*/
#include "gpu.cuh"
#include "math_functions.cuh"
#include <hipsparse.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/HIPUtils.h>
#include <ATen/hip/impl/HIPCachingAllocatorMasqueradingAsCUDA.h>
#include <torch/extension.h>
#include <torch/script.h>
namespace minkowski {
#define BLOCK_SIZE 128
template <typename Itype, typename Dtype>
__global__ void
unique_row2num_nonzero(const int n, Dtype *__restrict__ d_num_nonzero,
const Itype *__restrict__ unique_row_ptr,
const Dtype *__restrict__ reduced_val_ptr) {
CUDA_KERNEL_LOOP(index, n) {
d_num_nonzero[unique_row_ptr[index]] = reduced_val_ptr[index];
}
}
template <typename Itype, typename Dtype>
__global__ void inverse_val(const int n, Dtype *__restrict__ d_sorted_val,
const Itype *__restrict__ sorted_row,
const Dtype *__restrict__ reduced_val) {
auto const tx = threadIdx.x;
auto const bx = blockIdx.x;
auto const x = blockDim.x * bx + tx;
if (x < n) {
Itype row = sorted_row[x];
d_sorted_val[x] = 1.0 / __ldg(&reduced_val[row]);
}
}
hipDataType getTensorCudaDataType(torch::Tensor const &self) {
hipDataType cuda_data_type;
switch (self.scalar_type()) {
case torch::ScalarType::Float:
cuda_data_type = HIP_R_32F;
break;
case torch::ScalarType::Double:
cuda_data_type = HIP_R_64F;
break;
default:
TORCH_CHECK(false, "Tensor types must be either float32 or float64");
break;
}
return cuda_data_type;
}
// template <typename th_int_type>
// torch::Tensor sort_row_col(torch::Tensor &rows, torch::Tensor &cols) {
// AT_DISPATCH_FLOATING_TYPES(vals.scalar_type(), "coo_spmm", [&] {
// }
// }
template <typename th_int_type>
torch::Tensor coo_spmm(torch::Tensor const &rows, torch::Tensor const &cols,
torch::Tensor const &vals, int64_t const dim_i,
int64_t const dim_j, torch::Tensor const &mat2,
int64_t const spmm_algorithm_id, bool const is_sorted) {
#if defined __HIP_PLATFORM_HCC__
TORCH_CHECK(false, "spmm sparse-dense is not supported on HIP");
#elif defined(_WIN32) || defined(_WIN64)
TORCH_CHECK(false, "spmm sparse-dense CUDA is not supported on Windows");
#elif !defined(CUDART_VERSION)
TORCH_CHECK(false, "CUDART_VERSION not defined");
#endif
constexpr bool is_int32 = std::is_same<th_int_type, int32_t>::value;
constexpr bool is_int64 = std::is_same<th_int_type, int64_t>::value;
hipsparseSpMMAlg_t mm_alg;
#if defined(CUDART_VERSION) && (CUDART_VERSION < 10010)
TORCH_CHECK(false, "spmm sparse-dense requires CUDA 10.1 or greater");
#elif defined(CUDART_VERSION) && (CUDART_VERSION >= 10010) && \
(CUDART_VERSION < 11000)
switch (spmm_algorithm_id) {
case 1:
mm_alg = HIPSPARSE_COOMM_ALG1;
break;
case 2:
mm_alg = HIPSPARSE_COOMM_ALG2;
break;
case 3:
mm_alg = HIPSPARSE_COOMM_ALG3;
break;
default:
TORCH_CHECK(false, "Invalid algorithm id.", spmm_algorithm_id);
mm_alg = HIPSPARSE_MM_ALG_DEFAULT;
}
TORCH_CHECK(is_int32, "int64 hipsparseSpMM requires CUDA 11.1 or greater");
#elif defined(CUDART_VERSION) && (CUDART_VERSION >= 11000)
switch (spmm_algorithm_id) {
case 1:
mm_alg = HIPSPARSE_SPMM_COO_ALG1;
break;
case 2:
mm_alg = HIPSPARSE_SPMM_COO_ALG2;
break;
case 3:
mm_alg = CUSPARSE_SPMM_COO_ALG3;
break;
case 4:
mm_alg = CUSPARSE_SPMM_COO_ALG4;
break;
default:
TORCH_CHECK(false, "Invalid algorithm id.", spmm_algorithm_id);
mm_alg = CUSPARSE_SPMM_ALG_DEFAULT;
}
TORCH_CHECK(is_int32, "int64 coosort not implemented");
// coosort not supported with int64 || (is_int64 && (mm_alg ==
// CUSPARSE_SPMM_COO_ALG4)));
#endif
at::ScalarType int_scalar_type = std::is_same<th_int_type, int32_t>::value
? at::ScalarType::Int
: at::ScalarType::Long;
ASSERT(rows.scalar_type() == int_scalar_type, "int type mismatch.");
ASSERT(rows.scalar_type() == cols.scalar_type(),
"rows and cols must have the same scalar type.");
ASSERT(rows.scalar_type() == cols.scalar_type(),
"rows and cols must have the same scalar type.");
ASSERT(vals.scalar_type() == mat2.scalar_type(),
"vals and mat2 must have the same scalar type.");
ASSERT(rows.is_contiguous(), "rows must be contiguous");
ASSERT(cols.is_contiguous(), "cols must be contiguous");
ASSERT(vals.is_contiguous(), "vals must be contiguous");
ASSERT(rows.is_cuda(), "rows must be CUDA, but got CPU");
ASSERT(cols.is_cuda(), "cols must be CUDA, but got CPU");
ASSERT(vals.is_cuda(), "vals must be CUDA, but got CPU");
ASSERT(mat2.is_cuda(), "mat2 must be CUDA, but got CPU");
ASSERT(at::cuda::check_device({rows, cols, vals, mat2}),
"All inputs must be on the same device.");
ASSERT(mat2.dim() == 2, "Tensor 'mat2' must have 2 dims, but has ",
mat2.dim());
// int64_t dim_i = self.size(0);
// int64_t dim_j = self.size(1);
int64_t dim_k = mat2.size(1);
torch::Tensor result = at::zeros({dim_k, dim_i}, mat2.options());
// Create tensors to view just the current set of matrices
int64_t const nnz = rows.numel();
if ((dim_j == 0) || (dim_k == 0) || (nnz == 0)) {
return result;
}
// Dense matrices have to be contiguous for hipsparseSpMM to work
torch::Tensor const mat2_contig = mat2.contiguous();
// Issue 308
// auto cusparse_handle = at::cuda::getCurrentCUDASparseHandle();
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipsparseHandle_t cusparse_handle = getCurrentCUDASparseHandle();
hipsparseSetStream(cusparse_handle, stream);
torch::Scalar beta = 0;
torch::Scalar alpha = 1;
hipDataType cuda_data_type = getTensorCudaDataType(mat2_contig);
th_int_type *row_indices_ptr =
reinterpret_cast<th_int_type *>(rows.data_ptr());
th_int_type *col_indices_ptr =
reinterpret_cast<th_int_type *>(cols.data_ptr());
// Iterate through each set of 2D matrices within the 3D
// tensor inputs, performing a matrix multiply with each
AT_DISPATCH_FLOATING_TYPES(vals.scalar_type(), "coo_spmm", [&] {
scalar_t alpha_val = alpha.to<scalar_t>();
scalar_t beta_val = beta.to<scalar_t>();
scalar_t *values_ptr = reinterpret_cast<scalar_t *>(vals.data_ptr());
scalar_t *mat2_ptr = reinterpret_cast<scalar_t *>(mat2_contig.data_ptr());
scalar_t *result_ptr = reinterpret_cast<scalar_t *>(result.data_ptr());
th_int_type *sorted_row_ptr, *sorted_col_ptr;
scalar_t *sorted_val_ptr;
//////////////////////////////////////
// Sort the sparse matrix COO
LOG_DEBUG("Is sorted", is_sorted);
if (!is_sorted) {
sorted_row_ptr =
(th_int_type *)c10::hip::HIPCachingAllocator::raw_alloc(
2 * nnz * sizeof(th_int_type));
sorted_col_ptr = sorted_row_ptr + nnz;
sorted_val_ptr = (scalar_t *)c10::hip::HIPCachingAllocator::raw_alloc(
nnz * sizeof(scalar_t));
LOG_DEBUG("Allocated sorted row col val", nnz);
// Copy the indices
CUDA_CHECK(hipMemcpy(sorted_row_ptr, row_indices_ptr,
nnz * sizeof(th_int_type),
hipMemcpyDeviceToDevice));
CUDA_CHECK(hipMemcpy(sorted_col_ptr, col_indices_ptr,
nnz * sizeof(th_int_type),
hipMemcpyDeviceToDevice));
CUDA_CHECK(hipMemcpy(sorted_val_ptr, values_ptr, nnz * sizeof(scalar_t),
hipMemcpyDeviceToDevice));
thrust::sort_by_key(thrust::device, //
sorted_row_ptr, // key begin
sorted_row_ptr + nnz, // key end
thrust::make_zip_iterator( // value begin
thrust::make_tuple( //
sorted_col_ptr, //
sorted_val_ptr //
) //
));
LOG_DEBUG("sorted row", hipDeviceSynchronize());
} else {
sorted_row_ptr = row_indices_ptr;
sorted_col_ptr = col_indices_ptr;
sorted_val_ptr = values_ptr;
LOG_DEBUG("Initialized ptrs from inputs");
}
//////////////////////////////////////
size_t workspace_buffer_size = 0;
void *workspace_buffer = nullptr;
hipsparseSpMatDescr_t sparse_descr;
CUSPARSE_CHECK(hipsparseCreateCoo(
&sparse_descr, //
dim_i, dim_j, nnz, //
reinterpret_cast<void *>(sorted_row_ptr),
reinterpret_cast<void *>(sorted_col_ptr),
reinterpret_cast<void *>(sorted_val_ptr), //
std::is_same<th_int_type, int32_t>::value ? HIPSPARSE_INDEX_32I
: HIPSPARSE_INDEX_64I,
HIPSPARSE_INDEX_BASE_ZERO, cuda_data_type));
hipsparseDnMatDescr_t dense_descr;
CUSPARSE_CHECK(hipsparseCreateDnMat(&dense_descr, //
dim_k, dim_j, dim_k, //
reinterpret_cast<void *>(mat2_ptr), //
cuda_data_type, HIPSPARSE_ORDER_COL));
hipsparseDnMatDescr_t result_descr;
CUSPARSE_CHECK(hipsparseCreateDnMat(&result_descr, //
dim_i, dim_k, dim_i, //
reinterpret_cast<void *>(result_ptr), //
cuda_data_type, HIPSPARSE_ORDER_COL));
LOG_DEBUG("initialized matrices", hipGetLastError());
size_t required_workspace_buffer_size = 0;
CUSPARSE_CHECK(hipsparseSpMM_bufferSize(
cusparse_handle, HIPSPARSE_OPERATION_NON_TRANSPOSE,
HIPSPARSE_OPERATION_TRANSPOSE, (void *)&alpha_val, sparse_descr,
dense_descr, (void *)&beta_val, result_descr, cuda_data_type, mm_alg,
&required_workspace_buffer_size));
LOG_DEBUG("Buffer size:", required_workspace_buffer_size);
if (required_workspace_buffer_size > workspace_buffer_size) {
if (workspace_buffer != nullptr) {
hipFree(workspace_buffer);
}
workspace_buffer_size = required_workspace_buffer_size;
LOG_DEBUG("hipMallocManaged");
hipMallocManaged(&workspace_buffer, workspace_buffer_size);
}
CUSPARSE_CHECK(hipsparseSpMM(cusparse_handle, //
HIPSPARSE_OPERATION_NON_TRANSPOSE, //
HIPSPARSE_OPERATION_TRANSPOSE, //
(void *)&alpha_val, //
sparse_descr, dense_descr, //
(void *)&beta_val, result_descr, //
cuda_data_type, mm_alg, workspace_buffer));
#ifdef DEBUG
LOG_DEBUG("SPMM", hipDeviceSynchronize());
CUDA_CHECK_DEBUG(hipDeviceSynchronize());
#endif
// Cleanup
CUSPARSE_CHECK(hipsparseDestroySpMat(sparse_descr));
CUSPARSE_CHECK(hipsparseDestroyDnMat(dense_descr));
CUSPARSE_CHECK(hipsparseDestroyDnMat(result_descr));
if (!is_sorted) {
LOG_DEBUG("Dealloc");
c10::hip::HIPCachingAllocator::raw_delete((void *)sorted_row_ptr);
c10::hip::HIPCachingAllocator::raw_delete((void *)sorted_val_ptr);
}
if (workspace_buffer != nullptr) {
hipFree(workspace_buffer);
}
LOG_DEBUG("Dealloc finished", hipDeviceSynchronize());
});
// Need to transpose the result matrices since cusparse stores
// them in column-major order in memory
result.transpose_(0, 1);
CUDA_CHECK(hipDeviceSynchronize());
return result;
}
template torch::Tensor
coo_spmm<int32_t>(torch::Tensor const &rows, torch::Tensor const &cols,
torch::Tensor const &vals, int64_t const dim_i,
int64_t const dim_j, torch::Tensor const &mat2,
int64_t const spmm_algorithm_id, bool const is_sorted);
template <typename th_int_type>
std::vector<torch::Tensor> // output, sorted rows, sorted cols, sorted vals.
coo_spmm_average(torch::Tensor const &rows, torch::Tensor const &cols,
int64_t const dim_i, int64_t const dim_j,
torch::Tensor const &mat2, int64_t const spmm_algorithm_id) {
#if defined __HIP_PLATFORM_HCC__
TORCH_CHECK(false, "spmm sparse-dense is not supported on HIP");
#elif defined(_WIN32) || defined(_WIN64)
TORCH_CHECK(false, "spmm sparse-dense CUDA is not supported on Windows");
#elif !defined(CUDART_VERSION)
TORCH_CHECK(false, "CUDART_VERSION not defined");
#endif
constexpr bool is_int32 = std::is_same<th_int_type, int32_t>::value;
constexpr bool is_int64 = std::is_same<th_int_type, int64_t>::value;
hipsparseSpMMAlg_t mm_alg;
#if defined(CUDART_VERSION) && (CUDART_VERSION < 10010)
TORCH_CHECK(false, "spmm sparse-dense requires CUDA 10.1 or greater");
#elif defined(CUDART_VERSION) && (CUDART_VERSION >= 10010) && \
(CUDART_VERSION < 11000)
switch (spmm_algorithm_id) {
case 1:
mm_alg = HIPSPARSE_COOMM_ALG1;
break;
case 2:
mm_alg = HIPSPARSE_COOMM_ALG2;
break;
case 3:
mm_alg = HIPSPARSE_COOMM_ALG3;
break;
default:
TORCH_CHECK(false, "Invalid algorithm id.", spmm_algorithm_id);
mm_alg = HIPSPARSE_MM_ALG_DEFAULT;
}
TORCH_CHECK(is_int32, "int64 hipsparseSpMM requires CUDA 11.1 or greater");
#elif defined(CUDART_VERSION) && (CUDART_VERSION >= 11000)
switch (spmm_algorithm_id) {
case 1:
mm_alg = HIPSPARSE_SPMM_COO_ALG1;
break;
case 2:
mm_alg = HIPSPARSE_SPMM_COO_ALG2;
break;
case 3:
mm_alg = CUSPARSE_SPMM_COO_ALG3;
break;
case 4:
mm_alg = CUSPARSE_SPMM_COO_ALG4;
break;
default:
TORCH_CHECK(false, "Invalid algorithm id.", spmm_algorithm_id);
mm_alg = CUSPARSE_SPMM_ALG_DEFAULT;
}
TORCH_CHECK(is_int32, "int64 coosort not implemented");
// coosort not supported with int64 || (is_int64 && (mm_alg ==
// CUSPARSE_SPMM_COO_ALG4)));
#endif
at::ScalarType int_scalar_type = std::is_same<th_int_type, int32_t>::value
? at::ScalarType::Int
: at::ScalarType::Long;
hipDataType cuda_data_type = getTensorCudaDataType(mat2);
ASSERT(rows.scalar_type() == int_scalar_type, "int type mismatch.");
ASSERT(rows.scalar_type() == cols.scalar_type(),
"rows and cols must have the same scalar type.");
ASSERT(rows.scalar_type() == cols.scalar_type(),
"rows and cols must have the same scalar type.");
ASSERT(rows.is_contiguous(), "rows must be contiguous");
ASSERT(cols.is_contiguous(), "cols must be contiguous");
ASSERT(rows.is_cuda(), "rows must be CUDA, but got CPU");
ASSERT(cols.is_cuda(), "cols must be CUDA, but got CPU");
ASSERT(mat2.is_cuda(), "mat2 must be CUDA, but got CPU");
ASSERT(at::cuda::check_device({rows, cols, mat2}),
"All inputs must be on the same device.");
ASSERT(mat2.dim() == 2, "Tensor 'mat2' must have 2 dims, but has ",
mat2.dim());
// int64_t dim_i = self.size(0);
// int64_t dim_j = self.size(1);
int64_t dim_k = mat2.size(1);
// Create tensors to view just the current set of matrices
int64_t const nnz = rows.numel();
auto int_options =
torch::TensorOptions({at::kCUDA, at::hip::current_device()})
.dtype(int_scalar_type)
.requires_grad(false);
torch::Tensor result = at::zeros({dim_k, dim_i}, mat2.options());
torch::Tensor sorted_row_col = at::zeros({2, nnz}, int_options);
torch::Tensor sorted_val = at::zeros({nnz}, mat2.options());
if ((dim_j == 0) || (dim_k == 0) || (nnz == 0)) {
return {result, sorted_row_col, sorted_val};
}
// Dense matrices have to be contiguous for hipsparseSpMM to work
torch::Tensor const mat2_contig = mat2.contiguous();
// Issue 308
// auto cusparse_handle = at::cuda::getCurrentCUDASparseHandle();
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipsparseHandle_t cusparse_handle = getCurrentCUDASparseHandle();
hipsparseSetStream(cusparse_handle, stream);
torch::Scalar beta = 0;
torch::Scalar alpha = 1;
th_int_type *row_indices_ptr =
reinterpret_cast<th_int_type *>(rows.data_ptr());
th_int_type *col_indices_ptr =
reinterpret_cast<th_int_type *>(cols.data_ptr());
// Iterate through each set of 2D matrices within the 3D
// tensor inputs, performing a matrix multiply with each
AT_DISPATCH_FLOATING_TYPES(mat2.scalar_type(), "coo_spmm", [&] {
scalar_t alpha_val = alpha.to<scalar_t>();
scalar_t beta_val = beta.to<scalar_t>();
scalar_t *mat2_ptr = reinterpret_cast<scalar_t *>(mat2_contig.data_ptr());
scalar_t *result_ptr = reinterpret_cast<scalar_t *>(result.data_ptr());
//////////////////////////////////////
// Sort the sparse matrix COO
th_int_type *sorted_row_ptr =
reinterpret_cast<th_int_type *>(sorted_row_col.data_ptr());
th_int_type *sorted_col_ptr = sorted_row_ptr + nnz;
// Copy the indices
CUDA_CHECK(hipMemcpy(sorted_row_ptr, row_indices_ptr,
nnz * sizeof(th_int_type), hipMemcpyDeviceToDevice));
CUDA_CHECK(hipMemcpy(sorted_col_ptr, col_indices_ptr,
nnz * sizeof(th_int_type), hipMemcpyDeviceToDevice));
thrust::sort_by_key(thrust::device, //
sorted_row_ptr, // key begin
sorted_row_ptr + nnz, // key end
sorted_col_ptr);
/////////////////////////////////////////////////////////////////////////
// Create vals
/////////////////////////////////////////////////////////////////////////
th_int_type *unique_row_ptr =
(th_int_type *)c10::hip::HIPCachingAllocator::raw_alloc(
nnz * sizeof(th_int_type));
scalar_t *reduced_val_ptr =
(scalar_t *)c10::hip::HIPCachingAllocator::raw_alloc(
nnz * sizeof(scalar_t));
torch::Tensor ones = at::ones({nnz}, mat2.options());
// reduce by key
auto end = thrust::reduce_by_key(
thrust::device, // policy
sorted_row_ptr, // key begin
sorted_row_ptr + nnz, // key end
reinterpret_cast<scalar_t *>(ones.data_ptr()), // value begin
unique_row_ptr, // key out begin
reduced_val_ptr // value out begin
);
int num_unique_keys = end.first - unique_row_ptr;
LOG_DEBUG("Num unique keys:", num_unique_keys);
// Create values
// Copy the results to the correct output
hipLaunchKernelGGL(( inverse_val<th_int_type, scalar_t>)
, dim3(GET_BLOCKS(nnz, BLOCK_SIZE)), dim3(BLOCK_SIZE), 0, 0,
nnz, reinterpret_cast<scalar_t *>(sorted_val.data_ptr()),
sorted_row_ptr, reduced_val_ptr);
c10::hip::HIPCachingAllocator::raw_delete((void *)unique_row_ptr);
c10::hip::HIPCachingAllocator::raw_delete((void *)reduced_val_ptr);
/////////////////////////////////////////////////////////////////////////
size_t workspace_buffer_size = 0;
void *workspace_buffer = nullptr;
hipsparseSpMatDescr_t sparse_descr;
CUSPARSE_CHECK(hipsparseCreateCoo(
&sparse_descr, //
dim_i, dim_j, nnz, //
reinterpret_cast<void *>(sorted_row_ptr),
reinterpret_cast<void *>(sorted_col_ptr),
reinterpret_cast<void *>(sorted_val.data_ptr()), //
std::is_same<th_int_type, int32_t>::value ? HIPSPARSE_INDEX_32I
: HIPSPARSE_INDEX_64I,
HIPSPARSE_INDEX_BASE_ZERO, cuda_data_type));
hipsparseDnMatDescr_t dense_descr;
CUSPARSE_CHECK(hipsparseCreateDnMat(&dense_descr, //
dim_k, dim_j, dim_k, //
reinterpret_cast<void *>(mat2_ptr), //
cuda_data_type, HIPSPARSE_ORDER_COL));
hipsparseDnMatDescr_t result_descr;
CUSPARSE_CHECK(hipsparseCreateDnMat(&result_descr, //
dim_i, dim_k, dim_i, //
reinterpret_cast<void *>(result_ptr), //
cuda_data_type, HIPSPARSE_ORDER_COL));
size_t required_workspace_buffer_size = 0;
CUSPARSE_CHECK(hipsparseSpMM_bufferSize(
cusparse_handle, HIPSPARSE_OPERATION_NON_TRANSPOSE,
HIPSPARSE_OPERATION_TRANSPOSE, (void *)&alpha_val, sparse_descr,
dense_descr, (void *)&beta_val, result_descr, cuda_data_type, mm_alg,
&required_workspace_buffer_size));
LOG_DEBUG("Buffer size:", required_workspace_buffer_size);
if (required_workspace_buffer_size > workspace_buffer_size) {
if (workspace_buffer != nullptr) {
hipFree(workspace_buffer);
}
workspace_buffer_size = required_workspace_buffer_size;
LOG_DEBUG("hipMallocManaged");
hipMallocManaged(&workspace_buffer, workspace_buffer_size);
}
LOG_DEBUG("SPMM");
CUSPARSE_CHECK(hipsparseSpMM(cusparse_handle, //
HIPSPARSE_OPERATION_NON_TRANSPOSE, //
HIPSPARSE_OPERATION_TRANSPOSE, //
(void *)&alpha_val, //
sparse_descr, dense_descr, //
(void *)&beta_val, result_descr, //
cuda_data_type, mm_alg, workspace_buffer));
CUSPARSE_CHECK(hipsparseDestroySpMat(sparse_descr));
CUSPARSE_CHECK(hipsparseDestroyDnMat(dense_descr));
CUSPARSE_CHECK(hipsparseDestroyDnMat(result_descr));
if (workspace_buffer != nullptr) {
hipFree(workspace_buffer);
}
});
// Need to transpose the result matrices since cusparse stores
// them in column-major order in memory
result.transpose_(0, 1);
CUDA_CHECK(hipDeviceSynchronize());
return {result, sorted_row_col, sorted_val};
}
template std::vector<torch::Tensor> // output, sorted rows_cols, sorted vals.
coo_spmm_average<int32_t>(torch::Tensor const &rows, torch::Tensor const &cols,
int64_t const dim_i, int64_t const dim_j,
torch::Tensor const &mat2,
int64_t const spmm_algorithm_id);
// template torch::Tensor
// coo_spmm<int64_t>(torch::Tensor const &rows, torch::Tensor const &cols,
// torch::Tensor const &vals, int64_t const dim_i,
// int64_t const dim_j, torch::Tensor const &mat2,
// int64_t spmm_algorithm_id);
} // namespace minkowski
| 6c0f66cb257247723d5cd11eec88ba4bcbb0125d.cu | /*
* Copyright (c) 2020 NVIDIA Corporation.
* Copyright (c) 2018-2020 Chris Choy ([email protected]).
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
* Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
* of the code.
*/
#include "gpu.cuh"
#include "math_functions.cuh"
#include <cusparse.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDAUtils.h>
#include <c10/cuda/CUDACachingAllocator.h>
#include <torch/extension.h>
#include <torch/script.h>
namespace minkowski {
#define BLOCK_SIZE 128
template <typename Itype, typename Dtype>
__global__ void
unique_row2num_nonzero(const int n, Dtype *__restrict__ d_num_nonzero,
const Itype *__restrict__ unique_row_ptr,
const Dtype *__restrict__ reduced_val_ptr) {
CUDA_KERNEL_LOOP(index, n) {
d_num_nonzero[unique_row_ptr[index]] = reduced_val_ptr[index];
}
}
template <typename Itype, typename Dtype>
__global__ void inverse_val(const int n, Dtype *__restrict__ d_sorted_val,
const Itype *__restrict__ sorted_row,
const Dtype *__restrict__ reduced_val) {
auto const tx = threadIdx.x;
auto const bx = blockIdx.x;
auto const x = blockDim.x * bx + tx;
if (x < n) {
Itype row = sorted_row[x];
d_sorted_val[x] = 1.0 / __ldg(&reduced_val[row]);
}
}
cudaDataType getTensorCudaDataType(torch::Tensor const &self) {
cudaDataType cuda_data_type;
switch (self.scalar_type()) {
case torch::ScalarType::Float:
cuda_data_type = CUDA_R_32F;
break;
case torch::ScalarType::Double:
cuda_data_type = CUDA_R_64F;
break;
default:
TORCH_CHECK(false, "Tensor types must be either float32 or float64");
break;
}
return cuda_data_type;
}
// template <typename th_int_type>
// torch::Tensor sort_row_col(torch::Tensor &rows, torch::Tensor &cols) {
// AT_DISPATCH_FLOATING_TYPES(vals.scalar_type(), "coo_spmm", [&] {
// }
// }
template <typename th_int_type>
torch::Tensor coo_spmm(torch::Tensor const &rows, torch::Tensor const &cols,
torch::Tensor const &vals, int64_t const dim_i,
int64_t const dim_j, torch::Tensor const &mat2,
int64_t const spmm_algorithm_id, bool const is_sorted) {
#if defined __HIP_PLATFORM_HCC__
TORCH_CHECK(false, "spmm sparse-dense is not supported on HIP");
#elif defined(_WIN32) || defined(_WIN64)
TORCH_CHECK(false, "spmm sparse-dense CUDA is not supported on Windows");
#elif !defined(CUDART_VERSION)
TORCH_CHECK(false, "CUDART_VERSION not defined");
#endif
constexpr bool is_int32 = std::is_same<th_int_type, int32_t>::value;
constexpr bool is_int64 = std::is_same<th_int_type, int64_t>::value;
cusparseSpMMAlg_t mm_alg;
#if defined(CUDART_VERSION) && (CUDART_VERSION < 10010)
TORCH_CHECK(false, "spmm sparse-dense requires CUDA 10.1 or greater");
#elif defined(CUDART_VERSION) && (CUDART_VERSION >= 10010) && \
(CUDART_VERSION < 11000)
switch (spmm_algorithm_id) {
case 1:
mm_alg = CUSPARSE_COOMM_ALG1;
break;
case 2:
mm_alg = CUSPARSE_COOMM_ALG2;
break;
case 3:
mm_alg = CUSPARSE_COOMM_ALG3;
break;
default:
TORCH_CHECK(false, "Invalid algorithm id.", spmm_algorithm_id);
mm_alg = CUSPARSE_MM_ALG_DEFAULT;
}
TORCH_CHECK(is_int32, "int64 cusparseSpMM requires CUDA 11.1 or greater");
#elif defined(CUDART_VERSION) && (CUDART_VERSION >= 11000)
switch (spmm_algorithm_id) {
case 1:
mm_alg = CUSPARSE_SPMM_COO_ALG1;
break;
case 2:
mm_alg = CUSPARSE_SPMM_COO_ALG2;
break;
case 3:
mm_alg = CUSPARSE_SPMM_COO_ALG3;
break;
case 4:
mm_alg = CUSPARSE_SPMM_COO_ALG4;
break;
default:
TORCH_CHECK(false, "Invalid algorithm id.", spmm_algorithm_id);
mm_alg = CUSPARSE_SPMM_ALG_DEFAULT;
}
TORCH_CHECK(is_int32, "int64 coosort not implemented");
// coosort not supported with int64 || (is_int64 && (mm_alg ==
// CUSPARSE_SPMM_COO_ALG4)));
#endif
at::ScalarType int_scalar_type = std::is_same<th_int_type, int32_t>::value
? at::ScalarType::Int
: at::ScalarType::Long;
ASSERT(rows.scalar_type() == int_scalar_type, "int type mismatch.");
ASSERT(rows.scalar_type() == cols.scalar_type(),
"rows and cols must have the same scalar type.");
ASSERT(rows.scalar_type() == cols.scalar_type(),
"rows and cols must have the same scalar type.");
ASSERT(vals.scalar_type() == mat2.scalar_type(),
"vals and mat2 must have the same scalar type.");
ASSERT(rows.is_contiguous(), "rows must be contiguous");
ASSERT(cols.is_contiguous(), "cols must be contiguous");
ASSERT(vals.is_contiguous(), "vals must be contiguous");
ASSERT(rows.is_cuda(), "rows must be CUDA, but got CPU");
ASSERT(cols.is_cuda(), "cols must be CUDA, but got CPU");
ASSERT(vals.is_cuda(), "vals must be CUDA, but got CPU");
ASSERT(mat2.is_cuda(), "mat2 must be CUDA, but got CPU");
ASSERT(at::cuda::check_device({rows, cols, vals, mat2}),
"All inputs must be on the same device.");
ASSERT(mat2.dim() == 2, "Tensor 'mat2' must have 2 dims, but has ",
mat2.dim());
// int64_t dim_i = self.size(0);
// int64_t dim_j = self.size(1);
int64_t dim_k = mat2.size(1);
torch::Tensor result = at::zeros({dim_k, dim_i}, mat2.options());
// Create tensors to view just the current set of matrices
int64_t const nnz = rows.numel();
if ((dim_j == 0) || (dim_k == 0) || (nnz == 0)) {
return result;
}
// Dense matrices have to be contiguous for cusparseSpMM to work
torch::Tensor const mat2_contig = mat2.contiguous();
// Issue 308
// auto cusparse_handle = at::cuda::getCurrentCUDASparseHandle();
auto stream = at::cuda::getCurrentCUDAStream();
cusparseHandle_t cusparse_handle = getCurrentCUDASparseHandle();
cusparseSetStream(cusparse_handle, stream);
torch::Scalar beta = 0;
torch::Scalar alpha = 1;
cudaDataType cuda_data_type = getTensorCudaDataType(mat2_contig);
th_int_type *row_indices_ptr =
reinterpret_cast<th_int_type *>(rows.data_ptr());
th_int_type *col_indices_ptr =
reinterpret_cast<th_int_type *>(cols.data_ptr());
// Iterate through each set of 2D matrices within the 3D
// tensor inputs, performing a matrix multiply with each
AT_DISPATCH_FLOATING_TYPES(vals.scalar_type(), "coo_spmm", [&] {
scalar_t alpha_val = alpha.to<scalar_t>();
scalar_t beta_val = beta.to<scalar_t>();
scalar_t *values_ptr = reinterpret_cast<scalar_t *>(vals.data_ptr());
scalar_t *mat2_ptr = reinterpret_cast<scalar_t *>(mat2_contig.data_ptr());
scalar_t *result_ptr = reinterpret_cast<scalar_t *>(result.data_ptr());
th_int_type *sorted_row_ptr, *sorted_col_ptr;
scalar_t *sorted_val_ptr;
//////////////////////////////////////
// Sort the sparse matrix COO
LOG_DEBUG("Is sorted", is_sorted);
if (!is_sorted) {
sorted_row_ptr =
(th_int_type *)c10::cuda::CUDACachingAllocator::raw_alloc(
2 * nnz * sizeof(th_int_type));
sorted_col_ptr = sorted_row_ptr + nnz;
sorted_val_ptr = (scalar_t *)c10::cuda::CUDACachingAllocator::raw_alloc(
nnz * sizeof(scalar_t));
LOG_DEBUG("Allocated sorted row col val", nnz);
// Copy the indices
CUDA_CHECK(cudaMemcpy(sorted_row_ptr, row_indices_ptr,
nnz * sizeof(th_int_type),
cudaMemcpyDeviceToDevice));
CUDA_CHECK(cudaMemcpy(sorted_col_ptr, col_indices_ptr,
nnz * sizeof(th_int_type),
cudaMemcpyDeviceToDevice));
CUDA_CHECK(cudaMemcpy(sorted_val_ptr, values_ptr, nnz * sizeof(scalar_t),
cudaMemcpyDeviceToDevice));
thrust::sort_by_key(thrust::device, //
sorted_row_ptr, // key begin
sorted_row_ptr + nnz, // key end
thrust::make_zip_iterator( // value begin
thrust::make_tuple( //
sorted_col_ptr, //
sorted_val_ptr //
) //
));
LOG_DEBUG("sorted row", cudaDeviceSynchronize());
} else {
sorted_row_ptr = row_indices_ptr;
sorted_col_ptr = col_indices_ptr;
sorted_val_ptr = values_ptr;
LOG_DEBUG("Initialized ptrs from inputs");
}
//////////////////////////////////////
size_t workspace_buffer_size = 0;
void *workspace_buffer = nullptr;
cusparseSpMatDescr_t sparse_descr;
CUSPARSE_CHECK(cusparseCreateCoo(
&sparse_descr, //
dim_i, dim_j, nnz, //
reinterpret_cast<void *>(sorted_row_ptr),
reinterpret_cast<void *>(sorted_col_ptr),
reinterpret_cast<void *>(sorted_val_ptr), //
std::is_same<th_int_type, int32_t>::value ? CUSPARSE_INDEX_32I
: CUSPARSE_INDEX_64I,
CUSPARSE_INDEX_BASE_ZERO, cuda_data_type));
cusparseDnMatDescr_t dense_descr;
CUSPARSE_CHECK(cusparseCreateDnMat(&dense_descr, //
dim_k, dim_j, dim_k, //
reinterpret_cast<void *>(mat2_ptr), //
cuda_data_type, CUSPARSE_ORDER_COL));
cusparseDnMatDescr_t result_descr;
CUSPARSE_CHECK(cusparseCreateDnMat(&result_descr, //
dim_i, dim_k, dim_i, //
reinterpret_cast<void *>(result_ptr), //
cuda_data_type, CUSPARSE_ORDER_COL));
LOG_DEBUG("initialized matrices", cudaGetLastError());
size_t required_workspace_buffer_size = 0;
CUSPARSE_CHECK(cusparseSpMM_bufferSize(
cusparse_handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
CUSPARSE_OPERATION_TRANSPOSE, (void *)&alpha_val, sparse_descr,
dense_descr, (void *)&beta_val, result_descr, cuda_data_type, mm_alg,
&required_workspace_buffer_size));
LOG_DEBUG("Buffer size:", required_workspace_buffer_size);
if (required_workspace_buffer_size > workspace_buffer_size) {
if (workspace_buffer != nullptr) {
cudaFree(workspace_buffer);
}
workspace_buffer_size = required_workspace_buffer_size;
LOG_DEBUG("cudaMallocManaged");
cudaMallocManaged(&workspace_buffer, workspace_buffer_size);
}
CUSPARSE_CHECK(cusparseSpMM(cusparse_handle, //
CUSPARSE_OPERATION_NON_TRANSPOSE, //
CUSPARSE_OPERATION_TRANSPOSE, //
(void *)&alpha_val, //
sparse_descr, dense_descr, //
(void *)&beta_val, result_descr, //
cuda_data_type, mm_alg, workspace_buffer));
#ifdef DEBUG
LOG_DEBUG("SPMM", cudaDeviceSynchronize());
CUDA_CHECK_DEBUG(cudaDeviceSynchronize());
#endif
// Cleanup
CUSPARSE_CHECK(cusparseDestroySpMat(sparse_descr));
CUSPARSE_CHECK(cusparseDestroyDnMat(dense_descr));
CUSPARSE_CHECK(cusparseDestroyDnMat(result_descr));
if (!is_sorted) {
LOG_DEBUG("Dealloc");
c10::cuda::CUDACachingAllocator::raw_delete((void *)sorted_row_ptr);
c10::cuda::CUDACachingAllocator::raw_delete((void *)sorted_val_ptr);
}
if (workspace_buffer != nullptr) {
cudaFree(workspace_buffer);
}
LOG_DEBUG("Dealloc finished", cudaDeviceSynchronize());
});
// Need to transpose the result matrices since cusparse stores
// them in column-major order in memory
result.transpose_(0, 1);
CUDA_CHECK(cudaDeviceSynchronize());
return result;
}
template torch::Tensor
coo_spmm<int32_t>(torch::Tensor const &rows, torch::Tensor const &cols,
torch::Tensor const &vals, int64_t const dim_i,
int64_t const dim_j, torch::Tensor const &mat2,
int64_t const spmm_algorithm_id, bool const is_sorted);
template <typename th_int_type>
std::vector<torch::Tensor> // output, sorted rows, sorted cols, sorted vals.
coo_spmm_average(torch::Tensor const &rows, torch::Tensor const &cols,
int64_t const dim_i, int64_t const dim_j,
torch::Tensor const &mat2, int64_t const spmm_algorithm_id) {
#if defined __HIP_PLATFORM_HCC__
TORCH_CHECK(false, "spmm sparse-dense is not supported on HIP");
#elif defined(_WIN32) || defined(_WIN64)
TORCH_CHECK(false, "spmm sparse-dense CUDA is not supported on Windows");
#elif !defined(CUDART_VERSION)
TORCH_CHECK(false, "CUDART_VERSION not defined");
#endif
constexpr bool is_int32 = std::is_same<th_int_type, int32_t>::value;
constexpr bool is_int64 = std::is_same<th_int_type, int64_t>::value;
cusparseSpMMAlg_t mm_alg;
#if defined(CUDART_VERSION) && (CUDART_VERSION < 10010)
TORCH_CHECK(false, "spmm sparse-dense requires CUDA 10.1 or greater");
#elif defined(CUDART_VERSION) && (CUDART_VERSION >= 10010) && \
(CUDART_VERSION < 11000)
switch (spmm_algorithm_id) {
case 1:
mm_alg = CUSPARSE_COOMM_ALG1;
break;
case 2:
mm_alg = CUSPARSE_COOMM_ALG2;
break;
case 3:
mm_alg = CUSPARSE_COOMM_ALG3;
break;
default:
TORCH_CHECK(false, "Invalid algorithm id.", spmm_algorithm_id);
mm_alg = CUSPARSE_MM_ALG_DEFAULT;
}
TORCH_CHECK(is_int32, "int64 cusparseSpMM requires CUDA 11.1 or greater");
#elif defined(CUDART_VERSION) && (CUDART_VERSION >= 11000)
switch (spmm_algorithm_id) {
case 1:
mm_alg = CUSPARSE_SPMM_COO_ALG1;
break;
case 2:
mm_alg = CUSPARSE_SPMM_COO_ALG2;
break;
case 3:
mm_alg = CUSPARSE_SPMM_COO_ALG3;
break;
case 4:
mm_alg = CUSPARSE_SPMM_COO_ALG4;
break;
default:
TORCH_CHECK(false, "Invalid algorithm id.", spmm_algorithm_id);
mm_alg = CUSPARSE_SPMM_ALG_DEFAULT;
}
TORCH_CHECK(is_int32, "int64 coosort not implemented");
// coosort not supported with int64 || (is_int64 && (mm_alg ==
// CUSPARSE_SPMM_COO_ALG4)));
#endif
at::ScalarType int_scalar_type = std::is_same<th_int_type, int32_t>::value
? at::ScalarType::Int
: at::ScalarType::Long;
cudaDataType cuda_data_type = getTensorCudaDataType(mat2);
ASSERT(rows.scalar_type() == int_scalar_type, "int type mismatch.");
ASSERT(rows.scalar_type() == cols.scalar_type(),
"rows and cols must have the same scalar type.");
ASSERT(rows.scalar_type() == cols.scalar_type(),
"rows and cols must have the same scalar type.");
ASSERT(rows.is_contiguous(), "rows must be contiguous");
ASSERT(cols.is_contiguous(), "cols must be contiguous");
ASSERT(rows.is_cuda(), "rows must be CUDA, but got CPU");
ASSERT(cols.is_cuda(), "cols must be CUDA, but got CPU");
ASSERT(mat2.is_cuda(), "mat2 must be CUDA, but got CPU");
ASSERT(at::cuda::check_device({rows, cols, mat2}),
"All inputs must be on the same device.");
ASSERT(mat2.dim() == 2, "Tensor 'mat2' must have 2 dims, but has ",
mat2.dim());
// int64_t dim_i = self.size(0);
// int64_t dim_j = self.size(1);
int64_t dim_k = mat2.size(1);
// Create tensors to view just the current set of matrices
int64_t const nnz = rows.numel();
auto int_options =
torch::TensorOptions({at::kCUDA, at::cuda::current_device()})
.dtype(int_scalar_type)
.requires_grad(false);
torch::Tensor result = at::zeros({dim_k, dim_i}, mat2.options());
torch::Tensor sorted_row_col = at::zeros({2, nnz}, int_options);
torch::Tensor sorted_val = at::zeros({nnz}, mat2.options());
if ((dim_j == 0) || (dim_k == 0) || (nnz == 0)) {
return {result, sorted_row_col, sorted_val};
}
// Dense matrices have to be contiguous for cusparseSpMM to work
torch::Tensor const mat2_contig = mat2.contiguous();
// Issue 308
// auto cusparse_handle = at::cuda::getCurrentCUDASparseHandle();
auto stream = at::cuda::getCurrentCUDAStream();
cusparseHandle_t cusparse_handle = getCurrentCUDASparseHandle();
cusparseSetStream(cusparse_handle, stream);
torch::Scalar beta = 0;
torch::Scalar alpha = 1;
th_int_type *row_indices_ptr =
reinterpret_cast<th_int_type *>(rows.data_ptr());
th_int_type *col_indices_ptr =
reinterpret_cast<th_int_type *>(cols.data_ptr());
// Iterate through each set of 2D matrices within the 3D
// tensor inputs, performing a matrix multiply with each
AT_DISPATCH_FLOATING_TYPES(mat2.scalar_type(), "coo_spmm", [&] {
scalar_t alpha_val = alpha.to<scalar_t>();
scalar_t beta_val = beta.to<scalar_t>();
scalar_t *mat2_ptr = reinterpret_cast<scalar_t *>(mat2_contig.data_ptr());
scalar_t *result_ptr = reinterpret_cast<scalar_t *>(result.data_ptr());
//////////////////////////////////////
// Sort the sparse matrix COO
th_int_type *sorted_row_ptr =
reinterpret_cast<th_int_type *>(sorted_row_col.data_ptr());
th_int_type *sorted_col_ptr = sorted_row_ptr + nnz;
// Copy the indices
CUDA_CHECK(cudaMemcpy(sorted_row_ptr, row_indices_ptr,
nnz * sizeof(th_int_type), cudaMemcpyDeviceToDevice));
CUDA_CHECK(cudaMemcpy(sorted_col_ptr, col_indices_ptr,
nnz * sizeof(th_int_type), cudaMemcpyDeviceToDevice));
thrust::sort_by_key(thrust::device, //
sorted_row_ptr, // key begin
sorted_row_ptr + nnz, // key end
sorted_col_ptr);
/////////////////////////////////////////////////////////////////////////
// Create vals
/////////////////////////////////////////////////////////////////////////
th_int_type *unique_row_ptr =
(th_int_type *)c10::cuda::CUDACachingAllocator::raw_alloc(
nnz * sizeof(th_int_type));
scalar_t *reduced_val_ptr =
(scalar_t *)c10::cuda::CUDACachingAllocator::raw_alloc(
nnz * sizeof(scalar_t));
torch::Tensor ones = at::ones({nnz}, mat2.options());
// reduce by key
auto end = thrust::reduce_by_key(
thrust::device, // policy
sorted_row_ptr, // key begin
sorted_row_ptr + nnz, // key end
reinterpret_cast<scalar_t *>(ones.data_ptr()), // value begin
unique_row_ptr, // key out begin
reduced_val_ptr // value out begin
);
int num_unique_keys = end.first - unique_row_ptr;
LOG_DEBUG("Num unique keys:", num_unique_keys);
// Create values
// Copy the results to the correct output
inverse_val<th_int_type, scalar_t>
<<<GET_BLOCKS(nnz, BLOCK_SIZE), BLOCK_SIZE>>>(
nnz, reinterpret_cast<scalar_t *>(sorted_val.data_ptr()),
sorted_row_ptr, reduced_val_ptr);
c10::cuda::CUDACachingAllocator::raw_delete((void *)unique_row_ptr);
c10::cuda::CUDACachingAllocator::raw_delete((void *)reduced_val_ptr);
/////////////////////////////////////////////////////////////////////////
size_t workspace_buffer_size = 0;
void *workspace_buffer = nullptr;
cusparseSpMatDescr_t sparse_descr;
CUSPARSE_CHECK(cusparseCreateCoo(
&sparse_descr, //
dim_i, dim_j, nnz, //
reinterpret_cast<void *>(sorted_row_ptr),
reinterpret_cast<void *>(sorted_col_ptr),
reinterpret_cast<void *>(sorted_val.data_ptr()), //
std::is_same<th_int_type, int32_t>::value ? CUSPARSE_INDEX_32I
: CUSPARSE_INDEX_64I,
CUSPARSE_INDEX_BASE_ZERO, cuda_data_type));
cusparseDnMatDescr_t dense_descr;
CUSPARSE_CHECK(cusparseCreateDnMat(&dense_descr, //
dim_k, dim_j, dim_k, //
reinterpret_cast<void *>(mat2_ptr), //
cuda_data_type, CUSPARSE_ORDER_COL));
cusparseDnMatDescr_t result_descr;
CUSPARSE_CHECK(cusparseCreateDnMat(&result_descr, //
dim_i, dim_k, dim_i, //
reinterpret_cast<void *>(result_ptr), //
cuda_data_type, CUSPARSE_ORDER_COL));
size_t required_workspace_buffer_size = 0;
CUSPARSE_CHECK(cusparseSpMM_bufferSize(
cusparse_handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
CUSPARSE_OPERATION_TRANSPOSE, (void *)&alpha_val, sparse_descr,
dense_descr, (void *)&beta_val, result_descr, cuda_data_type, mm_alg,
&required_workspace_buffer_size));
LOG_DEBUG("Buffer size:", required_workspace_buffer_size);
if (required_workspace_buffer_size > workspace_buffer_size) {
if (workspace_buffer != nullptr) {
cudaFree(workspace_buffer);
}
workspace_buffer_size = required_workspace_buffer_size;
LOG_DEBUG("cudaMallocManaged");
cudaMallocManaged(&workspace_buffer, workspace_buffer_size);
}
LOG_DEBUG("SPMM");
CUSPARSE_CHECK(cusparseSpMM(cusparse_handle, //
CUSPARSE_OPERATION_NON_TRANSPOSE, //
CUSPARSE_OPERATION_TRANSPOSE, //
(void *)&alpha_val, //
sparse_descr, dense_descr, //
(void *)&beta_val, result_descr, //
cuda_data_type, mm_alg, workspace_buffer));
CUSPARSE_CHECK(cusparseDestroySpMat(sparse_descr));
CUSPARSE_CHECK(cusparseDestroyDnMat(dense_descr));
CUSPARSE_CHECK(cusparseDestroyDnMat(result_descr));
if (workspace_buffer != nullptr) {
cudaFree(workspace_buffer);
}
});
// Need to transpose the result matrices since cusparse stores
// them in column-major order in memory
result.transpose_(0, 1);
CUDA_CHECK(cudaDeviceSynchronize());
return {result, sorted_row_col, sorted_val};
}
template std::vector<torch::Tensor> // output, sorted rows_cols, sorted vals.
coo_spmm_average<int32_t>(torch::Tensor const &rows, torch::Tensor const &cols,
int64_t const dim_i, int64_t const dim_j,
torch::Tensor const &mat2,
int64_t const spmm_algorithm_id);
// template torch::Tensor
// coo_spmm<int64_t>(torch::Tensor const &rows, torch::Tensor const &cols,
// torch::Tensor const &vals, int64_t const dim_i,
// int64_t const dim_j, torch::Tensor const &mat2,
// int64_t spmm_algorithm_id);
} // namespace minkowski
|
28570e83a3bafc0b19d00a3da1942d9a6f7c3882.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ThresholdForward(const int n, const Dtype threshold,
const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > threshold ? 1 : 0;
}
}
template <typename Dtype>
void ThresholdLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = (*top)[0]->mutable_gpu_data();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ThresholdForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, threshold_, bottom_data, top_data);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_CLASS(ThresholdLayer);
} // namespace caffe
| 28570e83a3bafc0b19d00a3da1942d9a6f7c3882.cu | #include <algorithm>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ThresholdForward(const int n, const Dtype threshold,
const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > threshold ? 1 : 0;
}
}
template <typename Dtype>
void ThresholdLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = (*top)[0]->mutable_gpu_data();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
ThresholdForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, threshold_, bottom_data, top_data);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_CLASS(ThresholdLayer);
} // namespace caffe
|
d147f120954a4a6d724f6f75e6073dfe83098ece.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <ctime>
#include <vector>
#include <algorithm>
#include <stdlib.h>
// utilities
#include <helper_cuda.h>
#include <time.h>
///////////per request timing. L1 enabled.
///////////Both the first and second iteration have almost same latencies and miss patterns as the plain managed case.
///////////hipMemAdviseSetPreferredLocation doesn't seem to have noticeable effect on K40.
///////////P.S. The 800s actually happens randomly. Thus it is not another condition.
//typedef unsigned char byte;
void init_cpu_data(int* A, long long int size, int stride, long long int mod){
for (long long int i = 0; i < size; i = i + stride){
A[i]=(i + stride) % mod;
}
for (long long int i = 32; i < size; i = i + stride){
A[i]=(i + stride) % mod;
}
}
__device__ void P_chasing0(int mark, int *A, int iterations, int *B, int *C, long long int *D, int starting_index, float clock_rate, int data_stride){
int j = starting_index;/////make them in the same page, and miss near in cache lines
for (int it = 0; it < iterations; it++){
j = A[j];
}
B[0] = j;
}
//////////min page size 4kb = 4096b = 32 * 128.
__device__ void P_chasing1(int mark, int *A, long long int iterations, int *B, int *C, long long int *D, int starting_index, float clock_rate, int data_stride){
int j = starting_index;/////make them in the same page, and miss near in cache lines
//long long int start_time = 0;//////clock
//long long int end_time = 0;//////clock
//start_time = clock64();//////clock
for (int it = 0; it < iterations; it++){
j = A[j];
}
//end_time=clock64();//////clock
//long long int total_time = end_time - start_time;//////clock
//printf("inside%d:%fms\n", mark, (total_time / (float)clock_rate) / ((float)iterations));//////clock, average latency //////////the print will flush the L1?! (
B[0] = j;
//B[1] = (int) total_time;
}
//////////min page size 4kb = 4096b = 32 * 128.
__device__ void P_chasing2(int mark, int *A, long long int iterations, int *B, int *C, long long int *D, int starting_index, float clock_rate, int data_stride){//////what is the effect of warmup outside vs inside?
//////shared memory: 0xc000 max (49152 Bytes = 48KB)
__shared__ long long int s_tvalue[1024 * 4];/////must be enough to contain the number of iterations.
__shared__ int s_index[1024 * 4];
//__shared__ int s_index[1];
int j = starting_index;/////make them in the same page, and miss near in cache lines
//int j = B[0];
long long int start_time = 0;//////clock
long long int end_time = 0;//////clock
long long int time_interval = 0;//////clock
//long long int total_time = end_time - start_time;//////clock
/*
for (int it = 0; it < iterations; it++){
start_time = clock64();//////clock
j = A[j];
//s_index[it] = j;
end_time=clock64();//////clock
s_tvalue[it] = end_time - start_time;
}
*/
asm(".reg .u64 t1;\n\t"
".reg .u64 t2;\n\t");
for (long long int it = 0; it < iterations; it++){
/*
asm("mul.wide.u32 t1, %3, %5;\n\t"
"add.u64 t2, t1, %4;\n\t"
"mov.u64 %0, %clock64;\n\t"
"ld.global.u32 %2, [t2];\n\t"
"mov.u64 %1, %clock64;"
: "=l"(start_time), "=l"(end_time), "=r"(j) : "r"(j), "l"(A), "r"(4));
*/
asm("mul.wide.u32 t1, %2, %4;\n\t"
"add.u64 t2, t1, %3;\n\t"
"mov.u64 %0, %clock64;\n\t"
"ld.global.u32 %1, [t2];\n\t"
: "=l"(start_time), "=r"(j) : "r"(j), "l"(A), "r"(4));
s_index[it] = j;////what if without this? ///Then it is not accurate and cannot get the access time at all, due to the ILP. (another way is to use average time, but inevitably containing other instructions:setp, add).
asm volatile ("mov.u64 %0, %clock64;": "=l"(end_time));
time_interval = end_time - start_time;
//if(it >= 4 * 1024){
s_tvalue[it] = time_interval;
//}
}
//printf("inside%d:%fms\n", mark, (total_time / (float)clock_rate) / ((float)iterations));//////clock, average latency
B[0] = j;
for (long long int it = 0; it < iterations; it++){
C[it] = s_index[it];
D[it] = s_tvalue[it];
}
}
__global__ void tlb_latency_test(int *A, long long int iterations, int *B, int *C, long long int *D, float clock_rate, long long int mod, int data_stride){
long long int reduced_iter = iterations;
if(reduced_iter > 512){
reduced_iter = 512;
}else if(reduced_iter < 16){
reduced_iter = 16;
}
///////////kepler L2 has 48 * 1024 = 49152 cache lines. But we only have 1024 * 4 slots in shared memory.
P_chasing1(0, A, iterations + 0, B, C, D, 0, clock_rate, data_stride);////////saturate the L2
P_chasing2(0, A, reduced_iter, B, C, D, 0, clock_rate, data_stride);////////partially print the data
__syncthreads();
}
int main(int argc, char **argv)
{
printf("\n");
// set device
hipDeviceProp_t device_prop;
//int dev_id = findCudaDevice(argc, (const char **) argv);
int dev_id = 0;
checkCudaErrors(hipGetDeviceProperties(&device_prop, dev_id));
int peak_clk = 1;//kHz
checkCudaErrors(hipDeviceGetAttribute(&peak_clk, hipDeviceAttributeClockRate, dev_id));
float clock_rate = (float) peak_clk;
//printf("clock_rate_out_kernel:%f\n", clock_rate);
if (!device_prop.managedMemory) {
// This samples requires being run on a device that supports Unified Memory
fprintf(stderr, "Unified Memory not supported on this device\n");
exit(EXIT_WAIVED);
}
if (device_prop.computeMode == hipComputeModeProhibited)
{
// This sample requires being run with a default or process exclusive mode
fprintf(stderr, "This sample requires a device in either default or process exclusive mode\n");
exit(EXIT_WAIVED);
}
if (device_prop.concurrentManagedAccess == 1){
printf("This device supports concurrent Managed Access.\n");
}else{
printf("This device does not support concurrent Managed Access.\n");
}
int value1 = 1;
checkCudaErrors(hipDeviceGetAttribute(&value1, hipDeviceAttributeConcurrentManagedAccess, dev_id));
printf("hipDeviceAttributeConcurrentManagedAccess = %d\n", value1);
///////////////////////////////////////////////////////////////////GPU data out
int *GPU_data_out;
checkCudaErrors(hipMalloc(&GPU_data_out, sizeof(int) * 2));
FILE * pFile;
pFile = fopen ("output.txt","w");
int counter = 0;
/////////change the data stride as to observe if the latency increase is caused by iteration(cache) or stride(tlb)
for(int data_stride = 1 * 1 * 1024; data_stride <= 2 * 256 * 1024; data_stride = data_stride * 2){/////////32mb stride
//data_stride = data_stride + 32;///offset a cache line, trying to cause L2 miss but tlb hit.
//printf("###################data_stride%d#########################\n", data_stride);
//for(int mod = 1024 * 256 * 2; mod > 0; mod = mod - 32 * 1024){/////kepler L2 1.5m = 12288 cache lines, L1 16k = 128 cache lines.
for(long long int mod2 = 1 * 16 * 1024; mod2 <= 2147483648; mod2 = mod2 * 2){////268435456 = 1gb, 536870912 = 2gb, 1073741824 = 4gb, 2147483648 = 8gb, 4294967296 = 16gb.
counter++;
///////////////////////////////////////////////////////////////////CPU data begin
//int data_size = 2 * 256 * 1024 * 32;/////size = iteration * stride = 32 2mb pages.
long long int mod = mod2;
if(mod > 2684354560){
mod = 2684354560;
}
long long int data_size = mod;
if(data_size < 4194304){//////////data size at least 16mb to prevent L2 prefetch
data_size = 4194304;
}
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
long long int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256
int *CPU_data_in;
//CPU_data_in = (int*)malloc(sizeof(int) * data_size);
checkCudaErrors(hipMallocManaged(&CPU_data_in, sizeof(int) * data_size));/////////////using unified memory
checkCudaErrors(hipMemAdvise(CPU_data_in, sizeof(int) * data_size, hipMemAdviseSetPreferredLocation, hipCpuDeviceId));///////////////using hint
init_cpu_data(CPU_data_in, data_size, data_stride, mod);
long long int reduced_iter = iterations;
if(reduced_iter > 512){
reduced_iter = 512;
}else if(reduced_iter < 16){
reduced_iter = 16;
}
int *CPU_data_out_index;
CPU_data_out_index = (int*)malloc(sizeof(int) * reduced_iter);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * reduced_iter);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
//int *GPU_data_in;
//checkCudaErrors(hipMalloc(&GPU_data_in, sizeof(int) * data_size));
//hipMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, hipMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
int *GPU_data_out_index;
checkCudaErrors(hipMalloc(&GPU_data_out_index, sizeof(int) * reduced_iter));
long long int *GPU_data_out_time;
checkCudaErrors(hipMalloc(&GPU_data_out_time, sizeof(long long int) * reduced_iter));
hipLaunchKernelGGL(( tlb_latency_test), dim3(1), dim3(1), 0, 0, CPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
hipDeviceSynchronize();
hipMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(int) * reduced_iter, hipMemcpyDeviceToHost);
hipMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * reduced_iter, hipMemcpyDeviceToHost);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%lld##############%lld\n", mod, iterations);
for (long long int it = 0; it < reduced_iter; it++){
fprintf (pFile, "%d %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
checkCudaErrors(hipFree(GPU_data_out_index));
checkCudaErrors(hipFree(GPU_data_out_time));
//checkCudaErrors(hipFree(GPU_data_in));
checkCudaErrors(hipFree(CPU_data_in));
//free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
//printf("############################################\n\n");
}
checkCudaErrors(hipFree(GPU_data_out));
//free(CPU_data_out);
fclose (pFile);
exit(EXIT_SUCCESS);
}
| d147f120954a4a6d724f6f75e6073dfe83098ece.cu | #include <cstdio>
#include <ctime>
#include <vector>
#include <algorithm>
#include <stdlib.h>
// utilities
#include <helper_cuda.h>
#include <time.h>
///////////per request timing. L1 enabled.
///////////Both the first and second iteration have almost same latencies and miss patterns as the plain managed case.
///////////cudaMemAdviseSetPreferredLocation doesn't seem to have noticeable effect on K40.
///////////P.S. The 800s actually happens randomly. Thus it is not another condition.
//typedef unsigned char byte;
void init_cpu_data(int* A, long long int size, int stride, long long int mod){
for (long long int i = 0; i < size; i = i + stride){
A[i]=(i + stride) % mod;
}
for (long long int i = 32; i < size; i = i + stride){
A[i]=(i + stride) % mod;
}
}
__device__ void P_chasing0(int mark, int *A, int iterations, int *B, int *C, long long int *D, int starting_index, float clock_rate, int data_stride){
int j = starting_index;/////make them in the same page, and miss near in cache lines
for (int it = 0; it < iterations; it++){
j = A[j];
}
B[0] = j;
}
//////////min page size 4kb = 4096b = 32 * 128.
__device__ void P_chasing1(int mark, int *A, long long int iterations, int *B, int *C, long long int *D, int starting_index, float clock_rate, int data_stride){
int j = starting_index;/////make them in the same page, and miss near in cache lines
//long long int start_time = 0;//////clock
//long long int end_time = 0;//////clock
//start_time = clock64();//////clock
for (int it = 0; it < iterations; it++){
j = A[j];
}
//end_time=clock64();//////clock
//long long int total_time = end_time - start_time;//////clock
//printf("inside%d:%fms\n", mark, (total_time / (float)clock_rate) / ((float)iterations));//////clock, average latency //////////the print will flush the L1?! (
B[0] = j;
//B[1] = (int) total_time;
}
//////////min page size 4kb = 4096b = 32 * 128.
__device__ void P_chasing2(int mark, int *A, long long int iterations, int *B, int *C, long long int *D, int starting_index, float clock_rate, int data_stride){//////what is the effect of warmup outside vs inside?
//////shared memory: 0xc000 max (49152 Bytes = 48KB)
__shared__ long long int s_tvalue[1024 * 4];/////must be enough to contain the number of iterations.
__shared__ int s_index[1024 * 4];
//__shared__ int s_index[1];
int j = starting_index;/////make them in the same page, and miss near in cache lines
//int j = B[0];
long long int start_time = 0;//////clock
long long int end_time = 0;//////clock
long long int time_interval = 0;//////clock
//long long int total_time = end_time - start_time;//////clock
/*
for (int it = 0; it < iterations; it++){
start_time = clock64();//////clock
j = A[j];
//s_index[it] = j;
end_time=clock64();//////clock
s_tvalue[it] = end_time - start_time;
}
*/
asm(".reg .u64 t1;\n\t"
".reg .u64 t2;\n\t");
for (long long int it = 0; it < iterations; it++){
/*
asm("mul.wide.u32 t1, %3, %5;\n\t"
"add.u64 t2, t1, %4;\n\t"
"mov.u64 %0, %clock64;\n\t"
"ld.global.u32 %2, [t2];\n\t"
"mov.u64 %1, %clock64;"
: "=l"(start_time), "=l"(end_time), "=r"(j) : "r"(j), "l"(A), "r"(4));
*/
asm("mul.wide.u32 t1, %2, %4;\n\t"
"add.u64 t2, t1, %3;\n\t"
"mov.u64 %0, %clock64;\n\t"
"ld.global.u32 %1, [t2];\n\t"
: "=l"(start_time), "=r"(j) : "r"(j), "l"(A), "r"(4));
s_index[it] = j;////what if without this? ///Then it is not accurate and cannot get the access time at all, due to the ILP. (another way is to use average time, but inevitably containing other instructions:setp, add).
asm volatile ("mov.u64 %0, %clock64;": "=l"(end_time));
time_interval = end_time - start_time;
//if(it >= 4 * 1024){
s_tvalue[it] = time_interval;
//}
}
//printf("inside%d:%fms\n", mark, (total_time / (float)clock_rate) / ((float)iterations));//////clock, average latency
B[0] = j;
for (long long int it = 0; it < iterations; it++){
C[it] = s_index[it];
D[it] = s_tvalue[it];
}
}
__global__ void tlb_latency_test(int *A, long long int iterations, int *B, int *C, long long int *D, float clock_rate, long long int mod, int data_stride){
long long int reduced_iter = iterations;
if(reduced_iter > 512){
reduced_iter = 512;
}else if(reduced_iter < 16){
reduced_iter = 16;
}
///////////kepler L2 has 48 * 1024 = 49152 cache lines. But we only have 1024 * 4 slots in shared memory.
P_chasing1(0, A, iterations + 0, B, C, D, 0, clock_rate, data_stride);////////saturate the L2
P_chasing2(0, A, reduced_iter, B, C, D, 0, clock_rate, data_stride);////////partially print the data
__syncthreads();
}
int main(int argc, char **argv)
{
printf("\n");
// set device
cudaDeviceProp device_prop;
//int dev_id = findCudaDevice(argc, (const char **) argv);
int dev_id = 0;
checkCudaErrors(cudaGetDeviceProperties(&device_prop, dev_id));
int peak_clk = 1;//kHz
checkCudaErrors(cudaDeviceGetAttribute(&peak_clk, cudaDevAttrClockRate, dev_id));
float clock_rate = (float) peak_clk;
//printf("clock_rate_out_kernel:%f\n", clock_rate);
if (!device_prop.managedMemory) {
// This samples requires being run on a device that supports Unified Memory
fprintf(stderr, "Unified Memory not supported on this device\n");
exit(EXIT_WAIVED);
}
if (device_prop.computeMode == cudaComputeModeProhibited)
{
// This sample requires being run with a default or process exclusive mode
fprintf(stderr, "This sample requires a device in either default or process exclusive mode\n");
exit(EXIT_WAIVED);
}
if (device_prop.concurrentManagedAccess == 1){
printf("This device supports concurrent Managed Access.\n");
}else{
printf("This device does not support concurrent Managed Access.\n");
}
int value1 = 1;
checkCudaErrors(cudaDeviceGetAttribute(&value1, cudaDevAttrConcurrentManagedAccess, dev_id));
printf("cudaDevAttrConcurrentManagedAccess = %d\n", value1);
///////////////////////////////////////////////////////////////////GPU data out
int *GPU_data_out;
checkCudaErrors(cudaMalloc(&GPU_data_out, sizeof(int) * 2));
FILE * pFile;
pFile = fopen ("output.txt","w");
int counter = 0;
/////////change the data stride as to observe if the latency increase is caused by iteration(cache) or stride(tlb)
for(int data_stride = 1 * 1 * 1024; data_stride <= 2 * 256 * 1024; data_stride = data_stride * 2){/////////32mb stride
//data_stride = data_stride + 32;///offset a cache line, trying to cause L2 miss but tlb hit.
//printf("###################data_stride%d#########################\n", data_stride);
//for(int mod = 1024 * 256 * 2; mod > 0; mod = mod - 32 * 1024){/////kepler L2 1.5m = 12288 cache lines, L1 16k = 128 cache lines.
for(long long int mod2 = 1 * 16 * 1024; mod2 <= 2147483648; mod2 = mod2 * 2){////268435456 = 1gb, 536870912 = 2gb, 1073741824 = 4gb, 2147483648 = 8gb, 4294967296 = 16gb.
counter++;
///////////////////////////////////////////////////////////////////CPU data begin
//int data_size = 2 * 256 * 1024 * 32;/////size = iteration * stride = 32 2mb pages.
long long int mod = mod2;
if(mod > 2684354560){
mod = 2684354560;
}
long long int data_size = mod;
if(data_size < 4194304){//////////data size at least 16mb to prevent L2 prefetch
data_size = 4194304;
}
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
long long int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256
int *CPU_data_in;
//CPU_data_in = (int*)malloc(sizeof(int) * data_size);
checkCudaErrors(cudaMallocManaged(&CPU_data_in, sizeof(int) * data_size));/////////////using unified memory
checkCudaErrors(cudaMemAdvise(CPU_data_in, sizeof(int) * data_size, cudaMemAdviseSetPreferredLocation, cudaCpuDeviceId));///////////////using hint
init_cpu_data(CPU_data_in, data_size, data_stride, mod);
long long int reduced_iter = iterations;
if(reduced_iter > 512){
reduced_iter = 512;
}else if(reduced_iter < 16){
reduced_iter = 16;
}
int *CPU_data_out_index;
CPU_data_out_index = (int*)malloc(sizeof(int) * reduced_iter);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * reduced_iter);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
//int *GPU_data_in;
//checkCudaErrors(cudaMalloc(&GPU_data_in, sizeof(int) * data_size));
//cudaMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, cudaMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
int *GPU_data_out_index;
checkCudaErrors(cudaMalloc(&GPU_data_out_index, sizeof(int) * reduced_iter));
long long int *GPU_data_out_time;
checkCudaErrors(cudaMalloc(&GPU_data_out_time, sizeof(long long int) * reduced_iter));
tlb_latency_test<<<1, 1>>>(CPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
cudaDeviceSynchronize();
cudaMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(int) * reduced_iter, cudaMemcpyDeviceToHost);
cudaMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * reduced_iter, cudaMemcpyDeviceToHost);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%lld##############%lld\n", mod, iterations);
for (long long int it = 0; it < reduced_iter; it++){
fprintf (pFile, "%d %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
checkCudaErrors(cudaFree(GPU_data_out_index));
checkCudaErrors(cudaFree(GPU_data_out_time));
//checkCudaErrors(cudaFree(GPU_data_in));
checkCudaErrors(cudaFree(CPU_data_in));
//free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
//printf("############################################\n\n");
}
checkCudaErrors(cudaFree(GPU_data_out));
//free(CPU_data_out);
fclose (pFile);
exit(EXIT_SUCCESS);
}
|
223f8d012d9249751b5f19645f65822ede1599d8.hip | // !!! This is a file automatically generated by hipify!!!
#if __CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ >= 2)
// generated by gen_cutlass_gemv_batched_strided_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/matrix_mul/fp32_simt_gemv/matrix_mul_float_simt_gemv_batched_strided_cutlass_wrapper.cuinl"
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 16>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 1>;
using GemvKernel = cutlass::gemm::kernel::DefaultGemv<
ThreadBlockShape,
ThreadShape,
float, cutlass::layout::RowMajor,
float, cutlass::layout::RowMajor,
float, cutlass::layout::RowMajor>;
template void megdnn::cuda::cutlass_wrapper::
cutlass_vector_matrix_mul_batched_strided_wrapper<GemvKernel>(
BatchedGemmCoord const& problem_size,
const typename GemvKernel::ElementA* d_A, size_t lda, size_t batch_stride_a,
const typename GemvKernel::ElementB* d_B, size_t ldb, size_t batch_stride_b,
typename GemvKernel::ElementCD* d_C, size_t ldc, size_t batch_stride_c,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| 223f8d012d9249751b5f19645f65822ede1599d8.cu | #if __CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ >= 2)
// generated by gen_cutlass_gemv_batched_strided_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/matrix_mul/fp32_simt_gemv/matrix_mul_float_simt_gemv_batched_strided_cutlass_wrapper.cuinl"
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 16>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 1>;
using GemvKernel = cutlass::gemm::kernel::DefaultGemv<
ThreadBlockShape,
ThreadShape,
float, cutlass::layout::RowMajor,
float, cutlass::layout::RowMajor,
float, cutlass::layout::RowMajor>;
template void megdnn::cuda::cutlass_wrapper::
cutlass_vector_matrix_mul_batched_strided_wrapper<GemvKernel>(
BatchedGemmCoord const& problem_size,
const typename GemvKernel::ElementA* d_A, size_t lda, size_t batch_stride_a,
const typename GemvKernel::ElementB* d_B, size_t ldb, size_t batch_stride_b,
typename GemvKernel::ElementCD* d_C, size_t ldc, size_t batch_stride_c,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
ed7626835ba32a6f9e77630c8175cf6a738b2624.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/AccumulateType.h>
#include <ATen/hip/Exceptions.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/detail/FunctionTraits.h>
#include <cmath>
#include <limits>
#define GPU_LAMBDA __device__ __host__
namespace {
constexpr int num_threads = C10_WARP_SIZE * 2;
constexpr int thread_work_size = 1;
constexpr int block_work_size = thread_work_size * num_threads;
template<typename index_t, typename func_t>
C10_LAUNCH_BOUNDS_1(num_threads)
__global__ void elementwise_kernel_with_index(index_t N, func_t f, typename function_traits<func_t>::result_type *data) {
#pragma unroll
for (int i = 0; i < thread_work_size; i++) {
index_t idx = block_work_size * blockIdx.x + num_threads * i + threadIdx.x;
if (idx < N) {
data[idx] = f(idx);
}
}
}
template<typename func_t>
void gpu_kernel_with_index(at::Tensor &output, func_t f) {
int64_t N = output.numel();
if (N == 0) {
return;
}
int64_t grid = (N + block_work_size - 1) / block_work_size;
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
using scalar_t = typename function_traits<func_t>::result_type;
if (N <= std::numeric_limits<int>::max()) {
hipLaunchKernelGGL(( elementwise_kernel_with_index<int>), dim3(grid), dim3(num_threads), 0, stream, N, f, output.data_ptr<scalar_t>());
C10_HIP_KERNEL_LAUNCH_CHECK();
} else {
hipLaunchKernelGGL(( elementwise_kernel_with_index<int64_t>), dim3(grid), dim3(num_threads), 0, stream, N, f, output.data_ptr<scalar_t>());
C10_HIP_KERNEL_LAUNCH_CHECK();
}
}
} // namespace
namespace at {
namespace native {
Tensor& linspace_cuda_out(const Scalar& start, const Scalar& end, c10::optional<int64_t> optional_steps, Tensor& result) {
const auto steps = optional_steps.value_or(100);
TORCH_CHECK(steps >= 0, "number of steps must be non-negative");
if (!optional_steps.has_value()) {
TORCH_WARN_ONCE(
"Not providing a value for linspace's steps is deprecated and will "
"throw a runtime error in a future release. This warning will appear "
"only once per process.");
}
if (result.numel() != steps) {
result.resize_({steps});
}
bool is_contiguous = result.is_contiguous();
Tensor r = !is_contiguous ? at::empty_like(result, LEGACY_CONTIGUOUS_MEMORY_FORMAT) : result;
if (steps == 0) {
// skip
} else if (steps == 1) {
r.fill_(start);
} else if (isIntegralType(r.scalar_type(), 0)) {
AT_DISPATCH_INTEGRAL_TYPES(r.scalar_type(), "linspace_cuda", [&]() {
scalar_t scalar_start = start.to<scalar_t>();
scalar_t scalar_end = end.to<scalar_t>();
// Cast `end` and `start` to `float`, since range can be larger than scalar_t for integral types
float step = (static_cast<float>(scalar_end) - static_cast<float>(scalar_start)) / (steps - 1);
const int64_t halfway = steps / 2;
gpu_kernel_with_index(r, [scalar_start, scalar_end, steps, step, halfway]GPU_LAMBDA(int64_t ind) -> scalar_t {
if (ind < halfway) {
return scalar_start + (step * ind);
}
return scalar_end - step * (steps - ind - 1);
});
});
} else {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(kHalf, kBFloat16, r.scalar_type(), "linspace_cuda", [&]() {
scalar_t scalar_start = start.to<scalar_t>();
scalar_t scalar_end = end.to<scalar_t>();
scalar_t step = (scalar_end - scalar_start) / static_cast<scalar_t>(steps - 1);
const int64_t halfway = steps / 2;
gpu_kernel_with_index(r, [scalar_start, scalar_end, steps, step, halfway]GPU_LAMBDA(int64_t ind) -> scalar_t {
if (ind < halfway) {
return scalar_start + (step * ind);
}
return scalar_end - step * (steps - ind - 1);
});
});
}
if (!is_contiguous) {
result.copy_(r);
}
return result;
}
Tensor& logspace_cuda_out(const Scalar& start, const Scalar& end, c10::optional<int64_t> optional_steps, double base, Tensor& result) {
const auto steps = optional_steps.value_or(100);
TORCH_CHECK(steps >= 0, "number of steps must be non-negative");
if (!optional_steps.has_value()) {
TORCH_WARN_ONCE(
"Not providing a value for logspace's steps is deprecated and will "
"throw a runtime error in a future release. This warning will appear "
"only once per process.");
}
if (result.numel() != steps) {
result.resize_({steps});
}
bool is_contiguous = result.is_contiguous();
Tensor r = !is_contiguous ? at::empty_like(result, LEGACY_CONTIGUOUS_MEMORY_FORMAT) : result;
if (steps == 0) {
// skip
} else if (steps == 1) {
if (isComplexType(r.scalar_type())){
r.fill_(::pow(base, start.to<c10::complex<double>>()));
} else {
r.fill_(::pow(base, start.to<double>()));
}
} else if (isIntegralType(r.scalar_type(), 0)) {
AT_DISPATCH_INTEGRAL_TYPES(r.scalar_type(), "logspace_cuda", [&]() {
float scalar_base = static_cast<float>(base); // Use float to avoid promotion to double
scalar_t scalar_start = start.to<scalar_t>();
scalar_t scalar_end = end.to<scalar_t>();
float step = static_cast<float>(scalar_end - scalar_start) / (steps - 1);
const int64_t halfway = steps / 2;
gpu_kernel_with_index(r, [scalar_start, scalar_end, scalar_base, steps, step, halfway]GPU_LAMBDA(int64_t ind) -> scalar_t {
if (ind < halfway) {
return ::pow(scalar_base, scalar_start + step * ind);
}
return ::pow(scalar_base, scalar_end - step * (steps - ind - 1));
});
});
} else {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(kHalf, kBFloat16, r.scalar_type(), "logspace_cuda", [&]() {
scalar_t scalar_base = static_cast<scalar_t>(base);
scalar_t scalar_start = start.to<scalar_t>();
scalar_t scalar_end = end.to<scalar_t>();
scalar_t step = (scalar_end - scalar_start) / static_cast<scalar_t>(steps - 1);
const int64_t halfway = steps / 2;
gpu_kernel_with_index(r, [scalar_start, scalar_end, scalar_base, steps, step, halfway]GPU_LAMBDA(int64_t ind) -> scalar_t {
if (ind < halfway) {
return ::pow(scalar_base, scalar_start + step * ind);
}
return ::pow(scalar_base, scalar_end - step * (steps - ind - 1));
});
});
}
if (!is_contiguous) {
result.copy_(r);
}
return result;
}
Tensor& range_cuda_out(Tensor& result, const Scalar& start, const Scalar& end, const Scalar& step) {
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, result.scalar_type(), "range_cuda", [&]() {
using accscalar_t = at::acc_type<scalar_t, true>;
auto xstart = start.to<accscalar_t>();
auto xend = end.to<accscalar_t>();
auto xstep = step.to<accscalar_t>();
TORCH_CHECK(xstep > 0 || xstep < 0, "step must be nonzero");
TORCH_CHECK(std::isfinite(static_cast<double>(xstart)) &&
std::isfinite(static_cast<double>(xend)),
"unsupported range: ", xstart, " -> ", xend);
TORCH_CHECK(((xstep > 0) && (xend >= xstart)) || ((xstep < 0) && (xend <= xstart)),
"upper bound and larger bound inconsistent with step sign");
int64_t size = static_cast<int64_t>(((xend - xstart) / xstep) + 1);
if (result.numel() != size) {
result.resize_({size});
}
bool is_contiguous = result.is_contiguous();
Tensor r = !is_contiguous ? at::empty_like(result, LEGACY_CONTIGUOUS_MEMORY_FORMAT) : result;
gpu_kernel_with_index(r, [xstart, xstep]GPU_LAMBDA(int64_t ind) -> scalar_t {
accscalar_t inc = xstep * static_cast<accscalar_t>(ind);
accscalar_t val = xstart + inc;
return static_cast<scalar_t>(val);
});
if(!is_contiguous) {
result.copy_(r);
}
});
return result;
}
Tensor& arange_cuda_out(const Scalar& start, const Scalar& end, const Scalar& step, Tensor& result) {
AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, result.scalar_type(), "arange_cuda", [&]() {
using accscalar_t = at::acc_type<scalar_t, true>;
auto xstart = start.to<accscalar_t>();
auto xend = end.to<accscalar_t>();
auto xstep = step.to<accscalar_t>();
// we use double precision for (start - end) / step
// to compute size_d for consistency across devices.
// The problem with using accscalar_t is that accscalar_t might be float32 on gpu for a float32 scalar_t,
// but double on cpu for the same,
// and the effective output size starts differing on CPU vs GPU because of precision issues, which
// we dont want.
// the corner-case we do want to take into account is int64_t, which has higher precision than double
double size_d;
if (std::is_same<scalar_t, int64_t>::value) {
size_d = ::ceil(static_cast<double>(end.to<accscalar_t>() - start.to<accscalar_t>())
/ step.to<accscalar_t>());
} else {
size_d = ::ceil(static_cast<double>(end.to<double>() - start.to<double>())
/ step.to<double>());
}
TORCH_CHECK(xstep > 0 || xstep < 0, "step must be nonzero");
TORCH_CHECK(std::isfinite(static_cast<double>(xstart)) &&
std::isfinite(static_cast<double>(xend)),
"unsupported range: ", xstart, " -> ", xend);
TORCH_CHECK(((xstep > 0) && (xend >= xstart)) || ((xstep < 0) && (xend <= xstart)),
"upper bound and larger bound inconsistent with step sign");
TORCH_CHECK(size_d >= 0 && size_d <= static_cast<double>(std::numeric_limits<int64_t>::max()),
"invalid size, possible overflow?");
int64_t size = static_cast<int64_t>(size_d);
int64_t numel = result.numel();
if (numel != size) {
if(numel > 0){
TORCH_WARN("The number of elements in the out tensor of shape ", result.sizes(),
" is ", numel, " which does not match the computed number of elements ", size,
". Note that this may occur as a result of rounding error. "
"The out tensor will be resized to a tensor of shape (", size, ",).");
}
result.resize_({size});
}
bool is_contiguous = result.is_contiguous();
Tensor r = !is_contiguous ? at::empty_like(result, LEGACY_CONTIGUOUS_MEMORY_FORMAT) : result;
gpu_kernel_with_index(r, [xstart, xstep]GPU_LAMBDA(int64_t ind) -> scalar_t {
accscalar_t inc = xstep * static_cast<accscalar_t>(ind);
accscalar_t val = xstart + inc;
return static_cast<scalar_t>(val);
});
if(!is_contiguous) {
result.copy_(r);
}
});
return result;
}
}} // namespace at::native
| ed7626835ba32a6f9e77630c8175cf6a738b2624.cu | #include <ATen/ATen.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/Exceptions.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/detail/FunctionTraits.h>
#include <cmath>
#include <limits>
#define GPU_LAMBDA __device__ __host__
namespace {
constexpr int num_threads = C10_WARP_SIZE * 2;
constexpr int thread_work_size = 1;
constexpr int block_work_size = thread_work_size * num_threads;
template<typename index_t, typename func_t>
C10_LAUNCH_BOUNDS_1(num_threads)
__global__ void elementwise_kernel_with_index(index_t N, func_t f, typename function_traits<func_t>::result_type *data) {
#pragma unroll
for (int i = 0; i < thread_work_size; i++) {
index_t idx = block_work_size * blockIdx.x + num_threads * i + threadIdx.x;
if (idx < N) {
data[idx] = f(idx);
}
}
}
template<typename func_t>
void gpu_kernel_with_index(at::Tensor &output, func_t f) {
int64_t N = output.numel();
if (N == 0) {
return;
}
int64_t grid = (N + block_work_size - 1) / block_work_size;
auto stream = at::cuda::getCurrentCUDAStream();
using scalar_t = typename function_traits<func_t>::result_type;
if (N <= std::numeric_limits<int>::max()) {
elementwise_kernel_with_index<int><<<grid, num_threads, 0, stream>>>(N, f, output.data_ptr<scalar_t>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
} else {
elementwise_kernel_with_index<int64_t><<<grid, num_threads, 0, stream>>>(N, f, output.data_ptr<scalar_t>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
}
} // namespace
namespace at {
namespace native {
Tensor& linspace_cuda_out(const Scalar& start, const Scalar& end, c10::optional<int64_t> optional_steps, Tensor& result) {
const auto steps = optional_steps.value_or(100);
TORCH_CHECK(steps >= 0, "number of steps must be non-negative");
if (!optional_steps.has_value()) {
TORCH_WARN_ONCE(
"Not providing a value for linspace's steps is deprecated and will "
"throw a runtime error in a future release. This warning will appear "
"only once per process.");
}
if (result.numel() != steps) {
result.resize_({steps});
}
bool is_contiguous = result.is_contiguous();
Tensor r = !is_contiguous ? at::empty_like(result, LEGACY_CONTIGUOUS_MEMORY_FORMAT) : result;
if (steps == 0) {
// skip
} else if (steps == 1) {
r.fill_(start);
} else if (isIntegralType(r.scalar_type(), 0)) {
AT_DISPATCH_INTEGRAL_TYPES(r.scalar_type(), "linspace_cuda", [&]() {
scalar_t scalar_start = start.to<scalar_t>();
scalar_t scalar_end = end.to<scalar_t>();
// Cast `end` and `start` to `float`, since range can be larger than scalar_t for integral types
float step = (static_cast<float>(scalar_end) - static_cast<float>(scalar_start)) / (steps - 1);
const int64_t halfway = steps / 2;
gpu_kernel_with_index(r, [scalar_start, scalar_end, steps, step, halfway]GPU_LAMBDA(int64_t ind) -> scalar_t {
if (ind < halfway) {
return scalar_start + (step * ind);
}
return scalar_end - step * (steps - ind - 1);
});
});
} else {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(kHalf, kBFloat16, r.scalar_type(), "linspace_cuda", [&]() {
scalar_t scalar_start = start.to<scalar_t>();
scalar_t scalar_end = end.to<scalar_t>();
scalar_t step = (scalar_end - scalar_start) / static_cast<scalar_t>(steps - 1);
const int64_t halfway = steps / 2;
gpu_kernel_with_index(r, [scalar_start, scalar_end, steps, step, halfway]GPU_LAMBDA(int64_t ind) -> scalar_t {
if (ind < halfway) {
return scalar_start + (step * ind);
}
return scalar_end - step * (steps - ind - 1);
});
});
}
if (!is_contiguous) {
result.copy_(r);
}
return result;
}
Tensor& logspace_cuda_out(const Scalar& start, const Scalar& end, c10::optional<int64_t> optional_steps, double base, Tensor& result) {
const auto steps = optional_steps.value_or(100);
TORCH_CHECK(steps >= 0, "number of steps must be non-negative");
if (!optional_steps.has_value()) {
TORCH_WARN_ONCE(
"Not providing a value for logspace's steps is deprecated and will "
"throw a runtime error in a future release. This warning will appear "
"only once per process.");
}
if (result.numel() != steps) {
result.resize_({steps});
}
bool is_contiguous = result.is_contiguous();
Tensor r = !is_contiguous ? at::empty_like(result, LEGACY_CONTIGUOUS_MEMORY_FORMAT) : result;
if (steps == 0) {
// skip
} else if (steps == 1) {
if (isComplexType(r.scalar_type())){
r.fill_(std::pow(base, start.to<c10::complex<double>>()));
} else {
r.fill_(std::pow(base, start.to<double>()));
}
} else if (isIntegralType(r.scalar_type(), 0)) {
AT_DISPATCH_INTEGRAL_TYPES(r.scalar_type(), "logspace_cuda", [&]() {
float scalar_base = static_cast<float>(base); // Use float to avoid promotion to double
scalar_t scalar_start = start.to<scalar_t>();
scalar_t scalar_end = end.to<scalar_t>();
float step = static_cast<float>(scalar_end - scalar_start) / (steps - 1);
const int64_t halfway = steps / 2;
gpu_kernel_with_index(r, [scalar_start, scalar_end, scalar_base, steps, step, halfway]GPU_LAMBDA(int64_t ind) -> scalar_t {
if (ind < halfway) {
return std::pow(scalar_base, scalar_start + step * ind);
}
return std::pow(scalar_base, scalar_end - step * (steps - ind - 1));
});
});
} else {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(kHalf, kBFloat16, r.scalar_type(), "logspace_cuda", [&]() {
scalar_t scalar_base = static_cast<scalar_t>(base);
scalar_t scalar_start = start.to<scalar_t>();
scalar_t scalar_end = end.to<scalar_t>();
scalar_t step = (scalar_end - scalar_start) / static_cast<scalar_t>(steps - 1);
const int64_t halfway = steps / 2;
gpu_kernel_with_index(r, [scalar_start, scalar_end, scalar_base, steps, step, halfway]GPU_LAMBDA(int64_t ind) -> scalar_t {
if (ind < halfway) {
return std::pow(scalar_base, scalar_start + step * ind);
}
return std::pow(scalar_base, scalar_end - step * (steps - ind - 1));
});
});
}
if (!is_contiguous) {
result.copy_(r);
}
return result;
}
Tensor& range_cuda_out(Tensor& result, const Scalar& start, const Scalar& end, const Scalar& step) {
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, result.scalar_type(), "range_cuda", [&]() {
using accscalar_t = at::acc_type<scalar_t, true>;
auto xstart = start.to<accscalar_t>();
auto xend = end.to<accscalar_t>();
auto xstep = step.to<accscalar_t>();
TORCH_CHECK(xstep > 0 || xstep < 0, "step must be nonzero");
TORCH_CHECK(std::isfinite(static_cast<double>(xstart)) &&
std::isfinite(static_cast<double>(xend)),
"unsupported range: ", xstart, " -> ", xend);
TORCH_CHECK(((xstep > 0) && (xend >= xstart)) || ((xstep < 0) && (xend <= xstart)),
"upper bound and larger bound inconsistent with step sign");
int64_t size = static_cast<int64_t>(((xend - xstart) / xstep) + 1);
if (result.numel() != size) {
result.resize_({size});
}
bool is_contiguous = result.is_contiguous();
Tensor r = !is_contiguous ? at::empty_like(result, LEGACY_CONTIGUOUS_MEMORY_FORMAT) : result;
gpu_kernel_with_index(r, [xstart, xstep]GPU_LAMBDA(int64_t ind) -> scalar_t {
accscalar_t inc = xstep * static_cast<accscalar_t>(ind);
accscalar_t val = xstart + inc;
return static_cast<scalar_t>(val);
});
if(!is_contiguous) {
result.copy_(r);
}
});
return result;
}
Tensor& arange_cuda_out(const Scalar& start, const Scalar& end, const Scalar& step, Tensor& result) {
AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, result.scalar_type(), "arange_cuda", [&]() {
using accscalar_t = at::acc_type<scalar_t, true>;
auto xstart = start.to<accscalar_t>();
auto xend = end.to<accscalar_t>();
auto xstep = step.to<accscalar_t>();
// we use double precision for (start - end) / step
// to compute size_d for consistency across devices.
// The problem with using accscalar_t is that accscalar_t might be float32 on gpu for a float32 scalar_t,
// but double on cpu for the same,
// and the effective output size starts differing on CPU vs GPU because of precision issues, which
// we dont want.
// the corner-case we do want to take into account is int64_t, which has higher precision than double
double size_d;
if (std::is_same<scalar_t, int64_t>::value) {
size_d = std::ceil(static_cast<double>(end.to<accscalar_t>() - start.to<accscalar_t>())
/ step.to<accscalar_t>());
} else {
size_d = std::ceil(static_cast<double>(end.to<double>() - start.to<double>())
/ step.to<double>());
}
TORCH_CHECK(xstep > 0 || xstep < 0, "step must be nonzero");
TORCH_CHECK(std::isfinite(static_cast<double>(xstart)) &&
std::isfinite(static_cast<double>(xend)),
"unsupported range: ", xstart, " -> ", xend);
TORCH_CHECK(((xstep > 0) && (xend >= xstart)) || ((xstep < 0) && (xend <= xstart)),
"upper bound and larger bound inconsistent with step sign");
TORCH_CHECK(size_d >= 0 && size_d <= static_cast<double>(std::numeric_limits<int64_t>::max()),
"invalid size, possible overflow?");
int64_t size = static_cast<int64_t>(size_d);
int64_t numel = result.numel();
if (numel != size) {
if(numel > 0){
TORCH_WARN("The number of elements in the out tensor of shape ", result.sizes(),
" is ", numel, " which does not match the computed number of elements ", size,
". Note that this may occur as a result of rounding error. "
"The out tensor will be resized to a tensor of shape (", size, ",).");
}
result.resize_({size});
}
bool is_contiguous = result.is_contiguous();
Tensor r = !is_contiguous ? at::empty_like(result, LEGACY_CONTIGUOUS_MEMORY_FORMAT) : result;
gpu_kernel_with_index(r, [xstart, xstep]GPU_LAMBDA(int64_t ind) -> scalar_t {
accscalar_t inc = xstep * static_cast<accscalar_t>(ind);
accscalar_t val = xstart + inc;
return static_cast<scalar_t>(val);
});
if(!is_contiguous) {
result.copy_(r);
}
});
return result;
}
}} // namespace at::native
|
466416c089025fe21dfee02128fa0c3dec2ca6e7.hip | // !!! This is a file automatically generated by hipify!!!
/*
GFC code: A GPU-based compressor for arrays of double-precision
floating-point values.
Copyright (c) 2011-2020, Texas State University. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Texas State University nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL TEXAS STATE UNIVERSITY BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Authors: Molly A. O'Neil and Martin Burtscher
URL: The latest version of this code is available at
https://userweb.cs.txstate.edu/~burtscher/research/GFC/.
Publication: This work is described in detail in the following paper.
Molly A. O'Neil and Martin Burtscher. Floating-Point Data Compression at 75
Gb/s on a GPU. Proceedings of the Fourth Workshop on General Purpose Processing
Using GPUs, pp. 7:1-7:7. March 2011.
*/
#include "GFC_22.h"
#include <stdlib.h>
#include <stdio.h>
#include <assert.h>
#include <hip/hip_runtime.h>
#define ull unsigned long long
#define MAX (64*1024*1024)
#define WARPSIZE 32
__constant__ int dimensionalityd; // dimensionality parameter
__constant__ ull *cbufd; // ptr to uncompressed data
__constant__ unsigned char *dbufd; // ptr to compressed data
__constant__ ull *fbufd; // ptr to decompressed data
__constant__ int *cutd; // ptr to chunk boundaries
__constant__ int *offd; // ptr to chunk offsets after compression
/************************************************************************************/
/*
This is the GPU compression kernel, which should be launched using the block count
and warps/block:
hipLaunchKernelGGL(( CompressionKernel), dim3(blocks), dim3(WARPSIZE*warpsperblock), 0, 0, );
Inputs
------
dimensionalityd: dimensionality of trace (from cmd line)
cbufd: ptr to the uncompressed data
cutd: ptr to array of chunk boundaries
Output
------
The compressed data, in dbufd
Compressed chunk offsets for offset table, in offd
*/
__global__ void CompressionKernel()
{
register int offset, code, bcount, tmp, off, beg, end, lane, warp, iindex, lastidx, start, term;
register ull diff, prev;
__shared__ int ibufs[32 * (3 * WARPSIZE / 2)]; // shared space for prefix sum
// index within this warp
lane = threadIdx.x & 31;
// index within shared prefix sum array
iindex = threadIdx.x / WARPSIZE * (3 * WARPSIZE / 2) + lane;
ibufs[iindex] = 0;
iindex += WARPSIZE / 2;
lastidx = (threadIdx.x / WARPSIZE + 1) * (3 * WARPSIZE / 2) - 1;
// warp id
warp = (threadIdx.x + blockIdx.x * blockDim.x) / WARPSIZE;
// prediction index within previous subchunk
offset = WARPSIZE - (dimensionalityd - lane % dimensionalityd) - lane;
// determine start and end of chunk to compress
start = 0;
if (warp > 0) start = cutd[warp-1];
term = cutd[warp];
off = ((start+1)/2*17);
prev = 0;
for (int i = start + lane; i < term; i += WARPSIZE) {
// calculate delta between value to compress and prediction
// and negate if negative
diff = cbufd[i] - prev;
code = (diff >> 60) & 8;
if (code != 0) {
diff = -diff;
}
// count leading zeros in positive delta
bcount = 8 - (__clzll(diff) >> 3);
if (bcount == 2) bcount = 3; // encode 6 lead-zero bytes as 5
// prefix sum to determine start positions of non-zero delta bytes
ibufs[iindex] = bcount;
__threadfence_block();
ibufs[iindex] += ibufs[iindex-1];
__threadfence_block();
ibufs[iindex] += ibufs[iindex-2];
__threadfence_block();
ibufs[iindex] += ibufs[iindex-4];
__threadfence_block();
ibufs[iindex] += ibufs[iindex-8];
__threadfence_block();
ibufs[iindex] += ibufs[iindex-16];
__threadfence_block();
// write out non-zero bytes of delta to compressed buffer
beg = off + (WARPSIZE/2) + ibufs[iindex-1];
end = beg + bcount;
for (; beg < end; beg++) {
dbufd[beg] = diff;
diff >>= 8;
}
if (bcount >= 3) bcount--; // adjust byte count for the dropped encoding
tmp = ibufs[lastidx];
code |= bcount;
ibufs[iindex] = code;
__threadfence_block();
// write out half-bytes of sign and leading-zero-byte count (every other thread
// writes its half-byte and neighbor's half-byte)
if ((lane & 1) != 0) {
dbufd[off + (lane >> 1)] = ibufs[iindex-1] | (code << 4);
}
off += tmp + (WARPSIZE/2);
// save prediction value from this subchunk (based on provided dimensionality)
// for use in next subchunk
prev = cbufd[i + offset];
}
// save final value of off, which is total bytes of compressed output for this chunk
if (lane == 31) offd[warp] = off;
}
/************************************************************************************/
/*
This is the GPU decompression kernel, which should be launched using the block count
and warps/block:
hipLaunchKernelGGL(( CompressionKernel), dim3(blocks), dim3(WARPSIZE*warpsperblock), 0, 0, );
Inputs
------
dimensionalityd: dimensionality of trace
dbufd: ptr to array of compressed data
cutd: ptr to array of chunk boundaries
Output
------
The decompressed data in fbufd
*/
__global__ void DecompressionKernel()
{
register int offset, code, bcount, off, beg, end, lane, warp, iindex, lastidx, start, term;
register ull diff, prev;
__shared__ int ibufs[32 * (3 * WARPSIZE / 2)];
// index within this warp
lane = threadIdx.x & 31;
// index within shared prefix sum array
iindex = threadIdx.x / WARPSIZE * (3 * WARPSIZE / 2) + lane;
ibufs[iindex] = 0;
iindex += WARPSIZE / 2;
lastidx = (threadIdx.x / WARPSIZE + 1) * (3 * WARPSIZE / 2) - 1;
// warp id
warp = (threadIdx.x + blockIdx.x * blockDim.x) / WARPSIZE;
// prediction index within previous subchunk
offset = WARPSIZE - (dimensionalityd - lane % dimensionalityd) - lane;
// determine start and end of chunk to decompress
start = 0;
if (warp > 0) start = cutd[warp-1];
term = cutd[warp];
off = ((start+1)/2*17);
prev = 0;
for (int i = start + lane; i < term; i += WARPSIZE) {
// read in half-bytes of size and leading-zero count information
if ((lane & 1) == 0) {
code = dbufd[off + (lane >> 1)];
ibufs[iindex] = code;
ibufs[iindex + 1] = code >> 4;
}
off += (WARPSIZE/2);
__threadfence_block();
code = ibufs[iindex];
bcount = code & 7;
if (bcount >= 2) bcount++;
// calculate start positions of compressed data
ibufs[iindex] = bcount;
__threadfence_block();
ibufs[iindex] += ibufs[iindex-1];
__threadfence_block();
ibufs[iindex] += ibufs[iindex-2];
__threadfence_block();
ibufs[iindex] += ibufs[iindex-4];
__threadfence_block();
ibufs[iindex] += ibufs[iindex-8];
__threadfence_block();
ibufs[iindex] += ibufs[iindex-16];
__threadfence_block();
// read in compressed data (the non-zero bytes)
beg = off + ibufs[iindex-1];
off += ibufs[lastidx];
end = beg + bcount - 1;
diff = 0;
for (; beg <= end; end--) {
diff <<= 8;
diff |= dbufd[end];
}
// negate delta if sign bit indicates it was negated during compression
if ((code & 8) != 0) {
diff = -diff;
}
// write out the uncompressed word
fbufd[i] = prev + diff;
__threadfence_block();
// save prediction for next subchunk
prev = fbufd[i + offset];
}
}
/************************************************************************************/
static void CudaTest(const char *msg)
{
hipError_t e;
hipDeviceSynchronize();
if (hipSuccess != (e = hipGetLastError())) {
fprintf(stderr, "%s: %d\n", msg, e);
fprintf(stderr, "%s\n", hipGetErrorString(e));
abort();
}
}
// Burtscher's implementation has been slightly modified to read from / write to memory instead of files.
static size_t stream_read(void *buffer, size_t size, size_t items, const void *stream, size_t bytes, size_t *cursor) {
assert(*cursor <= bytes);
size_t remaining_items = (bytes - *cursor) / size;
size_t read = items < remaining_items ? items : remaining_items;
memcpy(buffer, (const char*) stream + *cursor, read * size);
*cursor += read * size;
return read;
}
static size_t stream_write(const void *buffer, size_t size, size_t items, void *stream, size_t *cursor) {
memcpy((char*) stream + *cursor, buffer, size * items);
*cursor += size * items;
return items;
}
/************************************************************************************/
size_t GFC_Compress_Memory(const void *in_stream, size_t in_size, void *out_stream, int blocks,
int warpsperblock, int dimensionality, uint64_t *kernel_time_us)
{
size_t in_cursor = 0;
size_t out_cursor = 0;
hipGetLastError(); // reset error value
// allocate CPU buffers
ull *cbuf = (ull *)malloc(sizeof(ull) * MAX); // uncompressed data
if (cbuf == NULL) {
fprintf(stderr, "cannot allocate cbuf\n"); abort();
}
char *dbuf = (char *)malloc(sizeof(char) * ((MAX+1)/2*17)); // compressed data
if (dbuf == NULL) {
fprintf(stderr, "cannot allocate dbuf\n"); abort();
}
int *cut = (int *)malloc(sizeof(int) * blocks * warpsperblock); // chunk boundaries
if (cut == NULL) {
fprintf(stderr, "cannot allocate cut\n"); abort();
}
int *off = (int *)malloc(sizeof(int) * blocks * warpsperblock); // offset table
if (off == NULL) {
fprintf(stderr, "cannot allocate off\n"); abort();
}
// read in trace to cbuf
int doubles = stream_read(cbuf, 8, MAX, in_stream, in_size, &in_cursor);
// calculate required padding for last chunk
int padding = ((doubles + WARPSIZE - 1) & -WARPSIZE) - doubles;
doubles += padding;
// determine chunk assignments per warp
int per = (doubles + blocks * warpsperblock - 1) / (blocks * warpsperblock);
if (per < WARPSIZE) per = WARPSIZE;
per = (per + WARPSIZE - 1) & -WARPSIZE;
int curr = 0, before = 0, d = 0;
for (int i = 0; i < blocks * warpsperblock; i++) {
curr += per;
cut[i] = min(curr, doubles);
if (cut[i] - before > 0) {
d = cut[i] - before;
}
before = cut[i];
}
// set the pad values to ensure correct prediction
if (d <= WARPSIZE) {
for (int i = doubles - padding; i < doubles; i++) {
cbuf[i] = 0;
}
} else {
for (int i = doubles - padding; i < doubles; i++) {
cbuf[i] = cbuf[(i & -WARPSIZE) - (dimensionality - i % dimensionality)];
}
}
// allocate GPU buffers
ull *cbufl; // uncompressed data
char *dbufl; // compressed data
int *cutl; // chunk boundaries
int *offl; // offset table
if (hipSuccess != hipMalloc((void **)&cbufl, sizeof(ull) * doubles))
fprintf(stderr, "could not allocate cbufd\n");
CudaTest("couldn't allocate cbufd");
if (hipSuccess != hipMalloc((void **)&dbufl, sizeof(char) * ((doubles+1)/2*17)))
fprintf(stderr, "could not allocate dbufd\n");
CudaTest("couldn't allocate dbufd");
if (hipSuccess != hipMalloc((void **)&cutl, sizeof(int) * blocks * warpsperblock))
fprintf(stderr, "could not allocate cutd\n");
CudaTest("couldn't allocate cutd");
if (hipSuccess != hipMalloc((void **)&offl, sizeof(int) * blocks * warpsperblock))
fprintf(stderr, "could not allocate offd\n");
CudaTest("couldn't allocate offd");
// copy buffer starting addresses (pointers) and values to constant memory
if (hipSuccess != hipMemcpyToSymbol(dimensionalityd, &dimensionality, sizeof(int)))
fprintf(stderr, "copying of dimensionality to device failed\n");
CudaTest("dimensionality copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(cbufd, &cbufl, sizeof(void *)))
fprintf(stderr, "copying of cbufl to device failed\n");
CudaTest("cbufl copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(dbufd, &dbufl, sizeof(void *)))
fprintf(stderr, "copying of dbufl to device failed\n");
CudaTest("dbufl copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(cutd, &cutl, sizeof(void *)))
fprintf(stderr, "copying of cutl to device failed\n");
CudaTest("cutl copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(offd, &offl, sizeof(void *)))
fprintf(stderr, "copying of offl to device failed\n");
CudaTest("offl copy to device failed");
// copy CPU buffer contents to GPU
if (hipSuccess != hipMemcpy(cbufl, cbuf, sizeof(ull) * doubles, hipMemcpyHostToDevice))
fprintf(stderr, "copying of cbuf to device failed\n");
CudaTest("cbuf copy to device failed");
if (hipSuccess != hipMemcpy(cutl, cut, sizeof(int) * blocks * warpsperblock, hipMemcpyHostToDevice))
fprintf(stderr, "copying of cut to device failed\n");
CudaTest("cut copy to device failed");
hipEvent_t begin, end;
if (kernel_time_us) {
hipEventCreate(&begin);
hipEventCreate(&end);
hipEventRecord(begin, NULL);
}
hipLaunchKernelGGL(( CompressionKernel), dim3(blocks), dim3(WARPSIZE*warpsperblock), 0, 0, );
CudaTest("compression kernel launch failed");
if (kernel_time_us) {
hipEventRecord(end, NULL);
float duration_ms;
hipEventSynchronize(end);
hipEventElapsedTime(&duration_ms, begin, end);
*kernel_time_us = (uint64_t) (duration_ms * 1000);
hipEventDestroy(end);
hipEventDestroy(begin);
}
// transfer offsets back to CPU
if(hipSuccess != hipMemcpy(off, offl, sizeof(int) * blocks * warpsperblock, hipMemcpyDeviceToHost))
fprintf(stderr, "copying of off from device failed\n");
CudaTest("off copy from device failed");
// output header
int num;
int doublecnt = doubles-padding;
num = stream_write(&blocks, 1, 1, out_stream, &out_cursor);
assert(1 == num);
num = stream_write(&warpsperblock, 1, 1, out_stream, &out_cursor);
assert(1 == num);
num = stream_write(&dimensionality, 1, 1, out_stream, &out_cursor);
assert(1 == num);
num = stream_write(&doublecnt, 4, 1, out_stream, &out_cursor);
assert(1 == num);
(void) num; // silence unused warning
// output offset table
for(int i = 0; i < blocks * warpsperblock; i++) {
int start = 0;
if(i > 0) start = cut[i-1];
off[i] -= ((start+1)/2*17);
num = stream_write(&off[i], 4, 1, out_stream, &out_cursor); // chunk's compressed size in bytes
assert(1 == num);
}
// output compressed data by chunk
for(int i = 0; i < blocks * warpsperblock; i++) {
int offset, start = 0;
if(i > 0) start = cut[i-1];
offset = ((start+1)/2*17);
// transfer compressed data back to CPU by chunk
if (hipSuccess != hipMemcpy(dbuf + offset, dbufl + offset, sizeof(char) * off[i], hipMemcpyDeviceToHost))
fprintf(stderr, "copying of dbuf from device failed\n");
CudaTest("dbuf copy from device failed");
num = stream_write(&dbuf[offset], 1, off[i], out_stream, &out_cursor);
assert(off[i] == num);
}
(void) num; // silence unused warning
free(cbuf);
free(dbuf);
free(cut);
free(off);
if (hipSuccess != hipFree(cbufl))
fprintf(stderr, "could not deallocate cbufd\n");
CudaTest("couldn't deallocate cbufd");
if (hipSuccess != hipFree(dbufl))
fprintf(stderr, "could not deallocate dbufd\n");
CudaTest("couldn't deallocate dbufd");
if (hipSuccess != hipFree(cutl))
fprintf(stderr, "could not deallocate cutd\n");
CudaTest("couldn't deallocate cutd");
if (hipSuccess != hipFree(offl))
fprintf(stderr, "could not deallocate offd\n");
CudaTest("couldn't deallocate offd");
return out_cursor;
}
/************************************************************************************/
size_t GFC_Decompress_Memory(const void *in_stream, size_t in_size, void *out_stream, uint64_t *kernel_time_us)
{
size_t in_cursor = 0;
size_t out_cursor = 0;
hipGetLastError(); // reset error value
int num, doubles;
int blocks, warpsperblock, dimensionality;
num = stream_read(&blocks, 1, 1, in_stream, in_size, &in_cursor);
assert(1 == num);
blocks &= 255;
num = stream_read(&warpsperblock, 1, 1, in_stream, in_size, &in_cursor);
assert(1 == num);
warpsperblock &= 255;
num = stream_read(&dimensionality, 1, 1, in_stream, in_size, &in_cursor);
assert(1 == num);
dimensionality &= 255;
num = stream_read(&doubles, 4, 1, in_stream, in_size, &in_cursor);
assert(1 == num);
(void) num; // silence unused warning
// allocate CPU buffers
char *dbuf = (char *)malloc(sizeof(char) * ((MAX+1)/2*17)); // compressed data, divided by chunk
if (dbuf == NULL) {
fprintf(stderr, "cannot allocate dbuf\n"); exit(-1);
}
ull *fbuf = (ull *)malloc(sizeof(ull) * MAX); // decompressed data
if (fbuf == NULL) {
fprintf(stderr, "cannot allocate fbuf\n"); exit(-1);
}
int *cut = (int *)malloc(sizeof(int) * blocks * warpsperblock); // chunk boundaries
if (cut == NULL) {
fprintf(stderr, "cannot allocate cut\n"); exit(-1);
}
int *off = (int *)malloc(sizeof(int) * blocks * warpsperblock); // offset table
if(off == NULL) {
fprintf(stderr, "cannot allocate off\n"); exit(-1);
}
// read in offset table
for(int i = 0; i < blocks * warpsperblock; i++) {
int num = stream_read(&off[i], 4, 1, in_stream, in_size, &in_cursor);
assert(1 == num);
}
// calculate required padding for last chunk
int padding = ((doubles + WARPSIZE - 1) & -WARPSIZE) - doubles;
doubles += padding;
// determine chunk assignments per warp
int per = (doubles + blocks * warpsperblock - 1) / (blocks * warpsperblock);
if (per < WARPSIZE) per = WARPSIZE;
per = (per + WARPSIZE - 1) & -WARPSIZE;
int curr = 0;
for (int i = 0; i < blocks * warpsperblock; i++) {
curr += per;
cut[i] = min(curr, doubles);
}
// allocate GPU buffers
char *dbufl; // compressed data
ull *fbufl; // uncompressed data
int *cutl; // chunk boundaries
if (hipSuccess != hipMalloc((void **)&dbufl, sizeof(char) * ((doubles+1)/2*17)))
fprintf(stderr, "could not allocate dbufd\n");
CudaTest("couldn't allocate dbufd");
if (hipSuccess != hipMalloc((void **)&fbufl, sizeof(ull) * doubles))
fprintf(stderr, "could not allocate fbufd\n");
CudaTest("couldn't allocate fbufd");
if (hipSuccess != hipMalloc((void **)&cutl, sizeof(int) * blocks * warpsperblock))
fprintf(stderr, "could not allocate cutd\n");
CudaTest("couldn't allocate cutd");
// copy buffer starting addresses (pointers) and values to constant memory
if (hipSuccess != hipMemcpyToSymbol(dimensionalityd, &dimensionality, sizeof(int)))
fprintf(stderr, "copying of dimensionality to device failed\n");
CudaTest("dimensionality copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(dbufd, &dbufl, sizeof(void *)))
fprintf(stderr, "copying of dbufl to device failed\n");
CudaTest("dbufl copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(fbufd, &fbufl, sizeof(void *)))
fprintf(stderr, "copying of fbufl to device failed\n");
CudaTest("fbufl copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(cutd, &cutl, sizeof(void *)))
fprintf(stderr, "copying of cutl to device failed\n");
CudaTest("cutl copy to device failed");
// read in input data and divide into chunks
for(int i = 0; i < blocks * warpsperblock; i++) {
int num, chbeg, start = 0;
if (i > 0) start = cut[i-1];
chbeg = ((start+1)/2*17);
// read in this chunk of data (based on offsets)
num = stream_read(&dbuf[chbeg], 1, off[i], in_stream, in_size, &in_cursor);
assert(off[i] == num);
(void) num; // silence unused warning
// transfer the chunk to the GPU
if (hipSuccess != hipMemcpy(dbufl + chbeg, dbuf + chbeg, sizeof(char) * off[i], hipMemcpyHostToDevice))
fprintf(stderr, "copying of dbuf to device failed\n");
CudaTest("dbuf copy to device failed");
}
// copy CPU cut buffer contents to GPU
if (hipSuccess != hipMemcpy(cutl, cut, sizeof(int) * blocks * warpsperblock, hipMemcpyHostToDevice))
fprintf(stderr, "copying of cut to device failed\n");
CudaTest("cut copy to device failed");
hipEvent_t begin, end;
if (kernel_time_us) {
hipEventCreate(&begin);
hipEventCreate(&end);
hipEventRecord(begin, NULL);
}
hipLaunchKernelGGL(( DecompressionKernel), dim3(blocks), dim3(WARPSIZE*warpsperblock), 0, 0, );
CudaTest("decompression kernel launch failed");
if (kernel_time_us) {
hipEventRecord(end, NULL);
float duration_ms;
hipEventSynchronize(end);
hipEventElapsedTime(&duration_ms, begin, end);
*kernel_time_us = (uint64_t) (duration_ms * 1000);
hipEventDestroy(end);
hipEventDestroy(begin);
}
// transfer result back to CPU
if (hipSuccess != hipMemcpy(fbuf, fbufl, sizeof(ull) * doubles, hipMemcpyDeviceToHost))
fprintf(stderr, "copying of fbuf from device failed\n");
CudaTest("fbuf copy from device failed");
// output decompressed data
num = stream_write(fbuf, 8, doubles-padding, out_stream, &out_cursor);
assert(num == doubles-padding);
(void) num; // silence unused warning
free(dbuf);
free(fbuf);
free(cut);
if(hipSuccess != hipFree(dbufl))
fprintf(stderr, "could not deallocate dbufd\n");
CudaTest("couldn't deallocate dbufd");
if(hipSuccess != hipFree(cutl))
fprintf(stderr, "could not deallocate cutd\n");
CudaTest("couldn't deallocate cutd");
return in_cursor;
}
/************************************************************************************/
static int VerifySystemParameters()
{
assert(1 == sizeof(char));
assert(4 == sizeof(int));
assert(8 == sizeof(ull));
#ifndef NDEBUG
int val = 1;
assert(1 == *((char *)&val));
#endif
int current_device = 0, sm_per_multiproc = 0;
int max_compute_perf = 0, max_perf_device = 0;
int device_count = 0, best_SM_arch = 0;
int arch_cores_sm[3] = { 1, 8, 32 };
hipDeviceProp_t deviceProp;
hipGetDeviceCount(&device_count);
if (device_count == 0) {
fprintf(stderr, "There is no device supporting CUDA\n");
exit(-1);
}
// Find the best major SM Architecture GPU device
for (current_device = 0; current_device < device_count; current_device++) {
hipGetDeviceProperties(&deviceProp, current_device);
if (deviceProp.major > 0 && deviceProp.major < 9999) {
best_SM_arch = max(best_SM_arch, deviceProp.major);
}
}
// Find the best CUDA capable GPU device
for (current_device = 0; current_device < device_count; current_device++) {
hipGetDeviceProperties(&deviceProp, current_device);
if (deviceProp.major == 9999 && deviceProp.minor == 9999) {
sm_per_multiproc = 1;
}
else if (deviceProp.major <= 2) {
sm_per_multiproc = arch_cores_sm[deviceProp.major];
}
else { // Device has SM major > 2
sm_per_multiproc = arch_cores_sm[2];
}
int compute_perf = deviceProp.multiProcessorCount *
sm_per_multiproc * deviceProp.clockRate;
if (compute_perf > max_compute_perf) {
// If we find GPU of SM major > 2, search only these
if (best_SM_arch > 2) {
// If device==best_SM_arch, choose this, or else pass
if (deviceProp.major == best_SM_arch) {
max_compute_perf = compute_perf;
max_perf_device = current_device;
}
}
else {
max_compute_perf = compute_perf;
max_perf_device = current_device;
}
}
}
hipGetDeviceProperties(&deviceProp, max_perf_device);
if ((deviceProp.major == 9999) && (deviceProp.minor == 9999)) {
fprintf(stderr, "There is no CUDA capable device\n");
exit(-1);
}
if (deviceProp.major < 2) {
fprintf(stderr, "Need at least compute capability 2.0\n");
exit(-1);
}
if (deviceProp.warpSize != WARPSIZE) {
fprintf(stderr, "Warp size must be %d\n", deviceProp.warpSize);
exit(-1);
}
if ((WARPSIZE <= 0) || (WARPSIZE & (WARPSIZE-1) != 0)) {
fprintf(stderr, "Warp size must be greater than zero and a power of two\n");
exit(-1);
}
return max_perf_device;
}
/************************************************************************************/
void GFC_Init()
{
int device;
device = VerifySystemParameters();
hipSetDevice(device);
hipFuncSetCacheConfig(CompressionKernel, hipFuncCachePreferL1);
hipFuncSetCacheConfig(DecompressionKernel, hipFuncCachePreferL1);
}
const char *GFC_Version_String = "GPU FP Compressor v2.2";
| 466416c089025fe21dfee02128fa0c3dec2ca6e7.cu | /*
GFC code: A GPU-based compressor for arrays of double-precision
floating-point values.
Copyright (c) 2011-2020, Texas State University. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Texas State University nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL TEXAS STATE UNIVERSITY BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Authors: Molly A. O'Neil and Martin Burtscher
URL: The latest version of this code is available at
https://userweb.cs.txstate.edu/~burtscher/research/GFC/.
Publication: This work is described in detail in the following paper.
Molly A. O'Neil and Martin Burtscher. Floating-Point Data Compression at 75
Gb/s on a GPU. Proceedings of the Fourth Workshop on General Purpose Processing
Using GPUs, pp. 7:1-7:7. March 2011.
*/
#include "GFC_22.h"
#include <stdlib.h>
#include <stdio.h>
#include <assert.h>
#include <cuda.h>
#define ull unsigned long long
#define MAX (64*1024*1024)
#define WARPSIZE 32
__constant__ int dimensionalityd; // dimensionality parameter
__constant__ ull *cbufd; // ptr to uncompressed data
__constant__ unsigned char *dbufd; // ptr to compressed data
__constant__ ull *fbufd; // ptr to decompressed data
__constant__ int *cutd; // ptr to chunk boundaries
__constant__ int *offd; // ptr to chunk offsets after compression
/************************************************************************************/
/*
This is the GPU compression kernel, which should be launched using the block count
and warps/block:
CompressionKernel<<<blocks, WARPSIZE*warpsperblock>>>();
Inputs
------
dimensionalityd: dimensionality of trace (from cmd line)
cbufd: ptr to the uncompressed data
cutd: ptr to array of chunk boundaries
Output
------
The compressed data, in dbufd
Compressed chunk offsets for offset table, in offd
*/
__global__ void CompressionKernel()
{
register int offset, code, bcount, tmp, off, beg, end, lane, warp, iindex, lastidx, start, term;
register ull diff, prev;
__shared__ int ibufs[32 * (3 * WARPSIZE / 2)]; // shared space for prefix sum
// index within this warp
lane = threadIdx.x & 31;
// index within shared prefix sum array
iindex = threadIdx.x / WARPSIZE * (3 * WARPSIZE / 2) + lane;
ibufs[iindex] = 0;
iindex += WARPSIZE / 2;
lastidx = (threadIdx.x / WARPSIZE + 1) * (3 * WARPSIZE / 2) - 1;
// warp id
warp = (threadIdx.x + blockIdx.x * blockDim.x) / WARPSIZE;
// prediction index within previous subchunk
offset = WARPSIZE - (dimensionalityd - lane % dimensionalityd) - lane;
// determine start and end of chunk to compress
start = 0;
if (warp > 0) start = cutd[warp-1];
term = cutd[warp];
off = ((start+1)/2*17);
prev = 0;
for (int i = start + lane; i < term; i += WARPSIZE) {
// calculate delta between value to compress and prediction
// and negate if negative
diff = cbufd[i] - prev;
code = (diff >> 60) & 8;
if (code != 0) {
diff = -diff;
}
// count leading zeros in positive delta
bcount = 8 - (__clzll(diff) >> 3);
if (bcount == 2) bcount = 3; // encode 6 lead-zero bytes as 5
// prefix sum to determine start positions of non-zero delta bytes
ibufs[iindex] = bcount;
__threadfence_block();
ibufs[iindex] += ibufs[iindex-1];
__threadfence_block();
ibufs[iindex] += ibufs[iindex-2];
__threadfence_block();
ibufs[iindex] += ibufs[iindex-4];
__threadfence_block();
ibufs[iindex] += ibufs[iindex-8];
__threadfence_block();
ibufs[iindex] += ibufs[iindex-16];
__threadfence_block();
// write out non-zero bytes of delta to compressed buffer
beg = off + (WARPSIZE/2) + ibufs[iindex-1];
end = beg + bcount;
for (; beg < end; beg++) {
dbufd[beg] = diff;
diff >>= 8;
}
if (bcount >= 3) bcount--; // adjust byte count for the dropped encoding
tmp = ibufs[lastidx];
code |= bcount;
ibufs[iindex] = code;
__threadfence_block();
// write out half-bytes of sign and leading-zero-byte count (every other thread
// writes its half-byte and neighbor's half-byte)
if ((lane & 1) != 0) {
dbufd[off + (lane >> 1)] = ibufs[iindex-1] | (code << 4);
}
off += tmp + (WARPSIZE/2);
// save prediction value from this subchunk (based on provided dimensionality)
// for use in next subchunk
prev = cbufd[i + offset];
}
// save final value of off, which is total bytes of compressed output for this chunk
if (lane == 31) offd[warp] = off;
}
/************************************************************************************/
/*
This is the GPU decompression kernel, which should be launched using the block count
and warps/block:
CompressionKernel<<<blocks, WARPSIZE*warpsperblock>>>();
Inputs
------
dimensionalityd: dimensionality of trace
dbufd: ptr to array of compressed data
cutd: ptr to array of chunk boundaries
Output
------
The decompressed data in fbufd
*/
__global__ void DecompressionKernel()
{
register int offset, code, bcount, off, beg, end, lane, warp, iindex, lastidx, start, term;
register ull diff, prev;
__shared__ int ibufs[32 * (3 * WARPSIZE / 2)];
// index within this warp
lane = threadIdx.x & 31;
// index within shared prefix sum array
iindex = threadIdx.x / WARPSIZE * (3 * WARPSIZE / 2) + lane;
ibufs[iindex] = 0;
iindex += WARPSIZE / 2;
lastidx = (threadIdx.x / WARPSIZE + 1) * (3 * WARPSIZE / 2) - 1;
// warp id
warp = (threadIdx.x + blockIdx.x * blockDim.x) / WARPSIZE;
// prediction index within previous subchunk
offset = WARPSIZE - (dimensionalityd - lane % dimensionalityd) - lane;
// determine start and end of chunk to decompress
start = 0;
if (warp > 0) start = cutd[warp-1];
term = cutd[warp];
off = ((start+1)/2*17);
prev = 0;
for (int i = start + lane; i < term; i += WARPSIZE) {
// read in half-bytes of size and leading-zero count information
if ((lane & 1) == 0) {
code = dbufd[off + (lane >> 1)];
ibufs[iindex] = code;
ibufs[iindex + 1] = code >> 4;
}
off += (WARPSIZE/2);
__threadfence_block();
code = ibufs[iindex];
bcount = code & 7;
if (bcount >= 2) bcount++;
// calculate start positions of compressed data
ibufs[iindex] = bcount;
__threadfence_block();
ibufs[iindex] += ibufs[iindex-1];
__threadfence_block();
ibufs[iindex] += ibufs[iindex-2];
__threadfence_block();
ibufs[iindex] += ibufs[iindex-4];
__threadfence_block();
ibufs[iindex] += ibufs[iindex-8];
__threadfence_block();
ibufs[iindex] += ibufs[iindex-16];
__threadfence_block();
// read in compressed data (the non-zero bytes)
beg = off + ibufs[iindex-1];
off += ibufs[lastidx];
end = beg + bcount - 1;
diff = 0;
for (; beg <= end; end--) {
diff <<= 8;
diff |= dbufd[end];
}
// negate delta if sign bit indicates it was negated during compression
if ((code & 8) != 0) {
diff = -diff;
}
// write out the uncompressed word
fbufd[i] = prev + diff;
__threadfence_block();
// save prediction for next subchunk
prev = fbufd[i + offset];
}
}
/************************************************************************************/
static void CudaTest(const char *msg)
{
cudaError_t e;
cudaThreadSynchronize();
if (cudaSuccess != (e = cudaGetLastError())) {
fprintf(stderr, "%s: %d\n", msg, e);
fprintf(stderr, "%s\n", cudaGetErrorString(e));
abort();
}
}
// Burtscher's implementation has been slightly modified to read from / write to memory instead of files.
static size_t stream_read(void *buffer, size_t size, size_t items, const void *stream, size_t bytes, size_t *cursor) {
assert(*cursor <= bytes);
size_t remaining_items = (bytes - *cursor) / size;
size_t read = items < remaining_items ? items : remaining_items;
memcpy(buffer, (const char*) stream + *cursor, read * size);
*cursor += read * size;
return read;
}
static size_t stream_write(const void *buffer, size_t size, size_t items, void *stream, size_t *cursor) {
memcpy((char*) stream + *cursor, buffer, size * items);
*cursor += size * items;
return items;
}
/************************************************************************************/
size_t GFC_Compress_Memory(const void *in_stream, size_t in_size, void *out_stream, int blocks,
int warpsperblock, int dimensionality, uint64_t *kernel_time_us)
{
size_t in_cursor = 0;
size_t out_cursor = 0;
cudaGetLastError(); // reset error value
// allocate CPU buffers
ull *cbuf = (ull *)malloc(sizeof(ull) * MAX); // uncompressed data
if (cbuf == NULL) {
fprintf(stderr, "cannot allocate cbuf\n"); abort();
}
char *dbuf = (char *)malloc(sizeof(char) * ((MAX+1)/2*17)); // compressed data
if (dbuf == NULL) {
fprintf(stderr, "cannot allocate dbuf\n"); abort();
}
int *cut = (int *)malloc(sizeof(int) * blocks * warpsperblock); // chunk boundaries
if (cut == NULL) {
fprintf(stderr, "cannot allocate cut\n"); abort();
}
int *off = (int *)malloc(sizeof(int) * blocks * warpsperblock); // offset table
if (off == NULL) {
fprintf(stderr, "cannot allocate off\n"); abort();
}
// read in trace to cbuf
int doubles = stream_read(cbuf, 8, MAX, in_stream, in_size, &in_cursor);
// calculate required padding for last chunk
int padding = ((doubles + WARPSIZE - 1) & -WARPSIZE) - doubles;
doubles += padding;
// determine chunk assignments per warp
int per = (doubles + blocks * warpsperblock - 1) / (blocks * warpsperblock);
if (per < WARPSIZE) per = WARPSIZE;
per = (per + WARPSIZE - 1) & -WARPSIZE;
int curr = 0, before = 0, d = 0;
for (int i = 0; i < blocks * warpsperblock; i++) {
curr += per;
cut[i] = min(curr, doubles);
if (cut[i] - before > 0) {
d = cut[i] - before;
}
before = cut[i];
}
// set the pad values to ensure correct prediction
if (d <= WARPSIZE) {
for (int i = doubles - padding; i < doubles; i++) {
cbuf[i] = 0;
}
} else {
for (int i = doubles - padding; i < doubles; i++) {
cbuf[i] = cbuf[(i & -WARPSIZE) - (dimensionality - i % dimensionality)];
}
}
// allocate GPU buffers
ull *cbufl; // uncompressed data
char *dbufl; // compressed data
int *cutl; // chunk boundaries
int *offl; // offset table
if (cudaSuccess != cudaMalloc((void **)&cbufl, sizeof(ull) * doubles))
fprintf(stderr, "could not allocate cbufd\n");
CudaTest("couldn't allocate cbufd");
if (cudaSuccess != cudaMalloc((void **)&dbufl, sizeof(char) * ((doubles+1)/2*17)))
fprintf(stderr, "could not allocate dbufd\n");
CudaTest("couldn't allocate dbufd");
if (cudaSuccess != cudaMalloc((void **)&cutl, sizeof(int) * blocks * warpsperblock))
fprintf(stderr, "could not allocate cutd\n");
CudaTest("couldn't allocate cutd");
if (cudaSuccess != cudaMalloc((void **)&offl, sizeof(int) * blocks * warpsperblock))
fprintf(stderr, "could not allocate offd\n");
CudaTest("couldn't allocate offd");
// copy buffer starting addresses (pointers) and values to constant memory
if (cudaSuccess != cudaMemcpyToSymbol(dimensionalityd, &dimensionality, sizeof(int)))
fprintf(stderr, "copying of dimensionality to device failed\n");
CudaTest("dimensionality copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(cbufd, &cbufl, sizeof(void *)))
fprintf(stderr, "copying of cbufl to device failed\n");
CudaTest("cbufl copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(dbufd, &dbufl, sizeof(void *)))
fprintf(stderr, "copying of dbufl to device failed\n");
CudaTest("dbufl copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(cutd, &cutl, sizeof(void *)))
fprintf(stderr, "copying of cutl to device failed\n");
CudaTest("cutl copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(offd, &offl, sizeof(void *)))
fprintf(stderr, "copying of offl to device failed\n");
CudaTest("offl copy to device failed");
// copy CPU buffer contents to GPU
if (cudaSuccess != cudaMemcpy(cbufl, cbuf, sizeof(ull) * doubles, cudaMemcpyHostToDevice))
fprintf(stderr, "copying of cbuf to device failed\n");
CudaTest("cbuf copy to device failed");
if (cudaSuccess != cudaMemcpy(cutl, cut, sizeof(int) * blocks * warpsperblock, cudaMemcpyHostToDevice))
fprintf(stderr, "copying of cut to device failed\n");
CudaTest("cut copy to device failed");
cudaEvent_t begin, end;
if (kernel_time_us) {
cudaEventCreate(&begin);
cudaEventCreate(&end);
cudaEventRecord(begin, NULL);
}
CompressionKernel<<<blocks, WARPSIZE*warpsperblock>>>();
CudaTest("compression kernel launch failed");
if (kernel_time_us) {
cudaEventRecord(end, NULL);
float duration_ms;
cudaEventSynchronize(end);
cudaEventElapsedTime(&duration_ms, begin, end);
*kernel_time_us = (uint64_t) (duration_ms * 1000);
cudaEventDestroy(end);
cudaEventDestroy(begin);
}
// transfer offsets back to CPU
if(cudaSuccess != cudaMemcpy(off, offl, sizeof(int) * blocks * warpsperblock, cudaMemcpyDeviceToHost))
fprintf(stderr, "copying of off from device failed\n");
CudaTest("off copy from device failed");
// output header
int num;
int doublecnt = doubles-padding;
num = stream_write(&blocks, 1, 1, out_stream, &out_cursor);
assert(1 == num);
num = stream_write(&warpsperblock, 1, 1, out_stream, &out_cursor);
assert(1 == num);
num = stream_write(&dimensionality, 1, 1, out_stream, &out_cursor);
assert(1 == num);
num = stream_write(&doublecnt, 4, 1, out_stream, &out_cursor);
assert(1 == num);
(void) num; // silence unused warning
// output offset table
for(int i = 0; i < blocks * warpsperblock; i++) {
int start = 0;
if(i > 0) start = cut[i-1];
off[i] -= ((start+1)/2*17);
num = stream_write(&off[i], 4, 1, out_stream, &out_cursor); // chunk's compressed size in bytes
assert(1 == num);
}
// output compressed data by chunk
for(int i = 0; i < blocks * warpsperblock; i++) {
int offset, start = 0;
if(i > 0) start = cut[i-1];
offset = ((start+1)/2*17);
// transfer compressed data back to CPU by chunk
if (cudaSuccess != cudaMemcpy(dbuf + offset, dbufl + offset, sizeof(char) * off[i], cudaMemcpyDeviceToHost))
fprintf(stderr, "copying of dbuf from device failed\n");
CudaTest("dbuf copy from device failed");
num = stream_write(&dbuf[offset], 1, off[i], out_stream, &out_cursor);
assert(off[i] == num);
}
(void) num; // silence unused warning
free(cbuf);
free(dbuf);
free(cut);
free(off);
if (cudaSuccess != cudaFree(cbufl))
fprintf(stderr, "could not deallocate cbufd\n");
CudaTest("couldn't deallocate cbufd");
if (cudaSuccess != cudaFree(dbufl))
fprintf(stderr, "could not deallocate dbufd\n");
CudaTest("couldn't deallocate dbufd");
if (cudaSuccess != cudaFree(cutl))
fprintf(stderr, "could not deallocate cutd\n");
CudaTest("couldn't deallocate cutd");
if (cudaSuccess != cudaFree(offl))
fprintf(stderr, "could not deallocate offd\n");
CudaTest("couldn't deallocate offd");
return out_cursor;
}
/************************************************************************************/
size_t GFC_Decompress_Memory(const void *in_stream, size_t in_size, void *out_stream, uint64_t *kernel_time_us)
{
size_t in_cursor = 0;
size_t out_cursor = 0;
cudaGetLastError(); // reset error value
int num, doubles;
int blocks, warpsperblock, dimensionality;
num = stream_read(&blocks, 1, 1, in_stream, in_size, &in_cursor);
assert(1 == num);
blocks &= 255;
num = stream_read(&warpsperblock, 1, 1, in_stream, in_size, &in_cursor);
assert(1 == num);
warpsperblock &= 255;
num = stream_read(&dimensionality, 1, 1, in_stream, in_size, &in_cursor);
assert(1 == num);
dimensionality &= 255;
num = stream_read(&doubles, 4, 1, in_stream, in_size, &in_cursor);
assert(1 == num);
(void) num; // silence unused warning
// allocate CPU buffers
char *dbuf = (char *)malloc(sizeof(char) * ((MAX+1)/2*17)); // compressed data, divided by chunk
if (dbuf == NULL) {
fprintf(stderr, "cannot allocate dbuf\n"); exit(-1);
}
ull *fbuf = (ull *)malloc(sizeof(ull) * MAX); // decompressed data
if (fbuf == NULL) {
fprintf(stderr, "cannot allocate fbuf\n"); exit(-1);
}
int *cut = (int *)malloc(sizeof(int) * blocks * warpsperblock); // chunk boundaries
if (cut == NULL) {
fprintf(stderr, "cannot allocate cut\n"); exit(-1);
}
int *off = (int *)malloc(sizeof(int) * blocks * warpsperblock); // offset table
if(off == NULL) {
fprintf(stderr, "cannot allocate off\n"); exit(-1);
}
// read in offset table
for(int i = 0; i < blocks * warpsperblock; i++) {
int num = stream_read(&off[i], 4, 1, in_stream, in_size, &in_cursor);
assert(1 == num);
}
// calculate required padding for last chunk
int padding = ((doubles + WARPSIZE - 1) & -WARPSIZE) - doubles;
doubles += padding;
// determine chunk assignments per warp
int per = (doubles + blocks * warpsperblock - 1) / (blocks * warpsperblock);
if (per < WARPSIZE) per = WARPSIZE;
per = (per + WARPSIZE - 1) & -WARPSIZE;
int curr = 0;
for (int i = 0; i < blocks * warpsperblock; i++) {
curr += per;
cut[i] = min(curr, doubles);
}
// allocate GPU buffers
char *dbufl; // compressed data
ull *fbufl; // uncompressed data
int *cutl; // chunk boundaries
if (cudaSuccess != cudaMalloc((void **)&dbufl, sizeof(char) * ((doubles+1)/2*17)))
fprintf(stderr, "could not allocate dbufd\n");
CudaTest("couldn't allocate dbufd");
if (cudaSuccess != cudaMalloc((void **)&fbufl, sizeof(ull) * doubles))
fprintf(stderr, "could not allocate fbufd\n");
CudaTest("couldn't allocate fbufd");
if (cudaSuccess != cudaMalloc((void **)&cutl, sizeof(int) * blocks * warpsperblock))
fprintf(stderr, "could not allocate cutd\n");
CudaTest("couldn't allocate cutd");
// copy buffer starting addresses (pointers) and values to constant memory
if (cudaSuccess != cudaMemcpyToSymbol(dimensionalityd, &dimensionality, sizeof(int)))
fprintf(stderr, "copying of dimensionality to device failed\n");
CudaTest("dimensionality copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(dbufd, &dbufl, sizeof(void *)))
fprintf(stderr, "copying of dbufl to device failed\n");
CudaTest("dbufl copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(fbufd, &fbufl, sizeof(void *)))
fprintf(stderr, "copying of fbufl to device failed\n");
CudaTest("fbufl copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(cutd, &cutl, sizeof(void *)))
fprintf(stderr, "copying of cutl to device failed\n");
CudaTest("cutl copy to device failed");
// read in input data and divide into chunks
for(int i = 0; i < blocks * warpsperblock; i++) {
int num, chbeg, start = 0;
if (i > 0) start = cut[i-1];
chbeg = ((start+1)/2*17);
// read in this chunk of data (based on offsets)
num = stream_read(&dbuf[chbeg], 1, off[i], in_stream, in_size, &in_cursor);
assert(off[i] == num);
(void) num; // silence unused warning
// transfer the chunk to the GPU
if (cudaSuccess != cudaMemcpy(dbufl + chbeg, dbuf + chbeg, sizeof(char) * off[i], cudaMemcpyHostToDevice))
fprintf(stderr, "copying of dbuf to device failed\n");
CudaTest("dbuf copy to device failed");
}
// copy CPU cut buffer contents to GPU
if (cudaSuccess != cudaMemcpy(cutl, cut, sizeof(int) * blocks * warpsperblock, cudaMemcpyHostToDevice))
fprintf(stderr, "copying of cut to device failed\n");
CudaTest("cut copy to device failed");
cudaEvent_t begin, end;
if (kernel_time_us) {
cudaEventCreate(&begin);
cudaEventCreate(&end);
cudaEventRecord(begin, NULL);
}
DecompressionKernel<<<blocks, WARPSIZE*warpsperblock>>>();
CudaTest("decompression kernel launch failed");
if (kernel_time_us) {
cudaEventRecord(end, NULL);
float duration_ms;
cudaEventSynchronize(end);
cudaEventElapsedTime(&duration_ms, begin, end);
*kernel_time_us = (uint64_t) (duration_ms * 1000);
cudaEventDestroy(end);
cudaEventDestroy(begin);
}
// transfer result back to CPU
if (cudaSuccess != cudaMemcpy(fbuf, fbufl, sizeof(ull) * doubles, cudaMemcpyDeviceToHost))
fprintf(stderr, "copying of fbuf from device failed\n");
CudaTest("fbuf copy from device failed");
// output decompressed data
num = stream_write(fbuf, 8, doubles-padding, out_stream, &out_cursor);
assert(num == doubles-padding);
(void) num; // silence unused warning
free(dbuf);
free(fbuf);
free(cut);
if(cudaSuccess != cudaFree(dbufl))
fprintf(stderr, "could not deallocate dbufd\n");
CudaTest("couldn't deallocate dbufd");
if(cudaSuccess != cudaFree(cutl))
fprintf(stderr, "could not deallocate cutd\n");
CudaTest("couldn't deallocate cutd");
return in_cursor;
}
/************************************************************************************/
static int VerifySystemParameters()
{
assert(1 == sizeof(char));
assert(4 == sizeof(int));
assert(8 == sizeof(ull));
#ifndef NDEBUG
int val = 1;
assert(1 == *((char *)&val));
#endif
int current_device = 0, sm_per_multiproc = 0;
int max_compute_perf = 0, max_perf_device = 0;
int device_count = 0, best_SM_arch = 0;
int arch_cores_sm[3] = { 1, 8, 32 };
cudaDeviceProp deviceProp;
cudaGetDeviceCount(&device_count);
if (device_count == 0) {
fprintf(stderr, "There is no device supporting CUDA\n");
exit(-1);
}
// Find the best major SM Architecture GPU device
for (current_device = 0; current_device < device_count; current_device++) {
cudaGetDeviceProperties(&deviceProp, current_device);
if (deviceProp.major > 0 && deviceProp.major < 9999) {
best_SM_arch = max(best_SM_arch, deviceProp.major);
}
}
// Find the best CUDA capable GPU device
for (current_device = 0; current_device < device_count; current_device++) {
cudaGetDeviceProperties(&deviceProp, current_device);
if (deviceProp.major == 9999 && deviceProp.minor == 9999) {
sm_per_multiproc = 1;
}
else if (deviceProp.major <= 2) {
sm_per_multiproc = arch_cores_sm[deviceProp.major];
}
else { // Device has SM major > 2
sm_per_multiproc = arch_cores_sm[2];
}
int compute_perf = deviceProp.multiProcessorCount *
sm_per_multiproc * deviceProp.clockRate;
if (compute_perf > max_compute_perf) {
// If we find GPU of SM major > 2, search only these
if (best_SM_arch > 2) {
// If device==best_SM_arch, choose this, or else pass
if (deviceProp.major == best_SM_arch) {
max_compute_perf = compute_perf;
max_perf_device = current_device;
}
}
else {
max_compute_perf = compute_perf;
max_perf_device = current_device;
}
}
}
cudaGetDeviceProperties(&deviceProp, max_perf_device);
if ((deviceProp.major == 9999) && (deviceProp.minor == 9999)) {
fprintf(stderr, "There is no CUDA capable device\n");
exit(-1);
}
if (deviceProp.major < 2) {
fprintf(stderr, "Need at least compute capability 2.0\n");
exit(-1);
}
if (deviceProp.warpSize != WARPSIZE) {
fprintf(stderr, "Warp size must be %d\n", deviceProp.warpSize);
exit(-1);
}
if ((WARPSIZE <= 0) || (WARPSIZE & (WARPSIZE-1) != 0)) {
fprintf(stderr, "Warp size must be greater than zero and a power of two\n");
exit(-1);
}
return max_perf_device;
}
/************************************************************************************/
void GFC_Init()
{
int device;
device = VerifySystemParameters();
cudaSetDevice(device);
cudaFuncSetCacheConfig(CompressionKernel, cudaFuncCachePreferL1);
cudaFuncSetCacheConfig(DecompressionKernel, cudaFuncCachePreferL1);
}
const char *GFC_Version_String = "GPU FP Compressor v2.2";
|
aee98bb888f5b212752e49312bdad7aff812b86b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "cuda_common.cuh"
__global__ void atomics(int *shared_var, int iters)
{
for (int i = 0; i < iters; i++)
{
atomicAdd(shared_var, 1);
}
}
__global__ void unsafe(int *shared_var, int iters)
{
for (int i = 0; i < iters; i++)
{
int old = *shared_var;
*shared_var = old + 1;
}
}
//int main(int argc, char **argv)
//{
// int N = 64;
// int block = 32;
// int runs = 30;
// int iters = 100000;
// int r;
// int *d_shared_var;
// int h_shared_var_atomic, h_shared_var_unsafe;
// int *h_values_read;
//
// gpuErrchk(hipMalloc((void **)&d_shared_var, sizeof(int)));
//
// double atomic_mean_time = 0;
// double unsafe_mean_time = 0;
// clock_t ops_start, ops_end;
//
// for (r = 0; r < runs; r++)
// {
// gpuErrchk(hipMemset(d_shared_var, 0x00, sizeof(int)));
//
// ops_start = clock();
// atomics <<< N / block, block >>>(d_shared_var,iters);
// gpuErrchk(hipDeviceSynchronize());
// ops_end = clock();
// atomic_mean_time += ops_end - ops_start;
//
// gpuErrchk(hipMemcpy(&h_shared_var_atomic, d_shared_var, sizeof(int),
// hipMemcpyDeviceToHost));
// gpuErrchk(hipMemset(d_shared_var, 0x00, sizeof(int)));
//
// ops_start = clock();
// unsafe <<< N / block, block >>>(d_shared_var,iters);
// gpuErrchk(hipDeviceSynchronize());
// ops_end = clock();
// unsafe_mean_time += ops_end - ops_start;
//
// gpuErrchk(hipMemcpy(&h_shared_var_unsafe, d_shared_var, sizeof(int),
// hipMemcpyDeviceToHost));
// }
//
// atomic_mean_time = atomic_mean_time / CLOCKS_PER_SEC;
// unsafe_mean_time = unsafe_mean_time / CLOCKS_PER_SEC;
//
// printf("In total, %d runs using atomic operations took %f s\n",
// runs, atomic_mean_time);
// printf(" Using atomic operations also produced an output of %d\n",
// h_shared_var_atomic);
// printf("In total, %d runs using unsafe operations took %f s\n",
// runs, unsafe_mean_time);
// printf(" Using unsafe operations also produced an output of %d\n",
// h_shared_var_unsafe);
//
// return 0;
//}
| aee98bb888f5b212752e49312bdad7aff812b86b.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "cuda_common.cuh"
__global__ void atomics(int *shared_var, int iters)
{
for (int i = 0; i < iters; i++)
{
atomicAdd(shared_var, 1);
}
}
__global__ void unsafe(int *shared_var, int iters)
{
for (int i = 0; i < iters; i++)
{
int old = *shared_var;
*shared_var = old + 1;
}
}
//int main(int argc, char **argv)
//{
// int N = 64;
// int block = 32;
// int runs = 30;
// int iters = 100000;
// int r;
// int *d_shared_var;
// int h_shared_var_atomic, h_shared_var_unsafe;
// int *h_values_read;
//
// gpuErrchk(cudaMalloc((void **)&d_shared_var, sizeof(int)));
//
// double atomic_mean_time = 0;
// double unsafe_mean_time = 0;
// clock_t ops_start, ops_end;
//
// for (r = 0; r < runs; r++)
// {
// gpuErrchk(cudaMemset(d_shared_var, 0x00, sizeof(int)));
//
// ops_start = clock();
// atomics <<< N / block, block >>>(d_shared_var,iters);
// gpuErrchk(cudaDeviceSynchronize());
// ops_end = clock();
// atomic_mean_time += ops_end - ops_start;
//
// gpuErrchk(cudaMemcpy(&h_shared_var_atomic, d_shared_var, sizeof(int),
// cudaMemcpyDeviceToHost));
// gpuErrchk(cudaMemset(d_shared_var, 0x00, sizeof(int)));
//
// ops_start = clock();
// unsafe <<< N / block, block >>>(d_shared_var,iters);
// gpuErrchk(cudaDeviceSynchronize());
// ops_end = clock();
// unsafe_mean_time += ops_end - ops_start;
//
// gpuErrchk(cudaMemcpy(&h_shared_var_unsafe, d_shared_var, sizeof(int),
// cudaMemcpyDeviceToHost));
// }
//
// atomic_mean_time = atomic_mean_time / CLOCKS_PER_SEC;
// unsafe_mean_time = unsafe_mean_time / CLOCKS_PER_SEC;
//
// printf("In total, %d runs using atomic operations took %f s\n",
// runs, atomic_mean_time);
// printf(" Using atomic operations also produced an output of %d\n",
// h_shared_var_atomic);
// printf("In total, %d runs using unsafe operations took %f s\n",
// runs, unsafe_mean_time);
// printf(" Using unsafe operations also produced an output of %d\n",
// h_shared_var_unsafe);
//
// return 0;
//}
|
ba7c753b26dad1305d9234ff73e2e148218c4127.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Indice2D.h"
#include "Indice1D.h"
#include "cudaTools.h"
#include <stdio.h>
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
__global__ void slice(float* ptrDevTab, int n);
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
__device__ double fpi(double x)
{
return 4 / (1 + x * x);
}
__global__ void slice(float* ptrDevTab, int n)
{
const int NB_THREAD = Indice2D::nbThread();
const int TID = Indice2D::tid();
{
const double DX = 1 / (double) n;
double xs;
int s = TID;
while (s < n)
{
xs = s * DX;
ptrDevTab[s] = fpi(xs);
s += NB_THREAD;
}
}
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
| ba7c753b26dad1305d9234ff73e2e148218c4127.cu | #include "Indice2D.h"
#include "Indice1D.h"
#include "cudaTools.h"
#include <stdio.h>
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
__global__ void slice(float* ptrDevTab, int n);
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
__device__ double fpi(double x)
{
return 4 / (1 + x * x);
}
__global__ void slice(float* ptrDevTab, int n)
{
const int NB_THREAD = Indice2D::nbThread();
const int TID = Indice2D::tid();
{
const double DX = 1 / (double) n;
double xs;
int s = TID;
while (s < n)
{
xs = s * DX;
ptrDevTab[s] = fpi(xs);
s += NB_THREAD;
}
}
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
|
8b85cce9cdb2bb257ccd9d470e2dbd9871ca870a.hip | // !!! This is a file automatically generated by hipify!!!
#define WITH_CUDA
#include <iostream>
#include <fstream>
#include <random>
#include <cstdio>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include "vector/include/Vector.hpp"
#include "cudastreams/CudaEvent.h"
#include "cudastreams/CudaStream.h"
#include "util.hpp"
#include "warp_reduction.hpp"
using index_type = int;
using host_index = memory::HostVector<index_type>;
using device_index = memory::DeviceVector<index_type>;
using value_type = double;
using host_vector = memory::HostVector<value_type>;
using device_vector = memory::DeviceVector<value_type>;
bool test(host_vector const& reference, host_vector const& v) {
assert(reference.size() == v.size());
auto success = true;
for(auto i=0; i<reference.size(); ++i) {
if(reference[i] != v[i]) {
printf(" error %10d expected %5.1f got %5.1f\n",
(int)i, (float)(reference[i]), (float)(v[i]));
success = false;
}
}
return success;
}
void print(host_vector const& v) {
auto pos = 0;
while(pos<v.size()) {
auto col = 0;
while(col<32 && pos<v.size()) {
printf("%3.0f", v[pos]);
++pos;
++col;
}
printf("\n");
}
}
void print(host_index const& v) {
auto pos = 0;
while(pos<v.size()) {
auto col = 0;
while(col<32 && pos<v.size()) {
printf("%3d", v[pos]);
++pos;
++col;
}
printf("\n");
}
}
host_index generate_index(int n, int max_bucket_size) {
std::random_device rd;
std::default_random_engine e(rd());
std::uniform_int_distribution<int> rng(1, max_bucket_size);
std::cout << " == bucket size " << max_bucket_size << " ==" << std::endl;
std::cout << " == array size " << n << " ==" << std::endl;
// generate the index vector on the host
host_index index(n);
auto pos = 0;
auto m = 0;
while(pos<n) {
auto increment = rng(e);
auto final = ::min(pos+increment, n);
while(pos<final) {
index[pos++] = m;
}
++m;
}
return index;
}
host_index read_index(std::string fname) {
std::ifstream fid(fname);
if(!fid.is_open()) {
std::cerr << memory::util::red("error") << " : unable to open file "
<< memory::util::yellow(fname) << std::endl;
exit(1);
}
int n;
fid >> n;
std::cout << "loading index of length " << n << " from file " << fname << std::endl;
host_index index(n);
for(auto i=0; i<n; ++i) fid >> index[i];
return index;
}
int main(int argc, char** argv) {
int max_bucket_size = read_arg(argc, argv, 1, -1);
// input array of length n
// output array of length m
// sorted indexes in p (length n)
auto ph =
max_bucket_size < 1 ?
read_index("index.txt")
: generate_index(1<<25, max_bucket_size);
const auto n = ph.size();
auto m = ph[n-1];
// make reference solution
host_vector solution(m);
solution(0,m) = 0;
for(auto i : ph) {
solution[i] += 1;
}
if(n<=256) {
std::cout << "in \n"; print(ph);
std::cout << "out\n"; print(solution);
}
// configure cuda stream for timing
CudaStream stream_compute(false);
// push index to the device
device_index p = ph;
device_vector in(n);
std::vector<device_vector> out(3);
for(auto &o: out) {
o = device_vector(m);
}
in(memory::all) = value_type{1};
auto threads_per_block=256;
auto blocks=(n+threads_per_block-1)/threads_per_block;
for(auto &o: out) o(memory::all) = value_type{0};
auto b1 = stream_compute.insert_event();
hipLaunchKernelGGL(( gpu::reduce_by_index)
, dim3(blocks), dim3(threads_per_block), 0, 0,
in.data(), out[0].data(), p.data(), n);
auto e1 = stream_compute.insert_event();
e1.wait();
std::cout << " 1 " << e1.time_since(b1) << " seconds" << std::endl;
test(solution, host_vector(out[0]));
for(auto &o: out) o(memory::all) = value_type{0};
auto b2 = stream_compute.insert_event();
hipLaunchKernelGGL(( gpu::reduce_by_index<value_type>)
, dim3(blocks), dim3(threads_per_block), 0, 0,
in.data(), out[0].data(), out[1].data(), p.data(), n);
auto e2 = stream_compute.insert_event();
e2.wait();
std::cout << " 2 " << e2.time_since(b2) << " seconds" << std::endl;
test(solution, host_vector(out[0]));
test(solution, host_vector(out[1]));
for(auto &o: out) o(memory::all) = value_type{0};
auto b3 = stream_compute.insert_event();
hipLaunchKernelGGL(( gpu::reduce_by_index<value_type>)
, dim3(blocks), dim3(threads_per_block), 0, 0,
in.data(), out[0].data(), out[1].data(), out[2].data(), p.data(), n);
auto e3 = stream_compute.insert_event();
e3.wait();
std::cout << " 3 " << e3.time_since(b3) << " seconds" << std::endl;
test(solution, host_vector(out[0]));
test(solution, host_vector(out[1]));
test(solution, host_vector(out[2]));
return 0;
}
| 8b85cce9cdb2bb257ccd9d470e2dbd9871ca870a.cu | #define WITH_CUDA
#include <iostream>
#include <fstream>
#include <random>
#include <cstdio>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include "vector/include/Vector.hpp"
#include "cudastreams/CudaEvent.h"
#include "cudastreams/CudaStream.h"
#include "util.hpp"
#include "warp_reduction.hpp"
using index_type = int;
using host_index = memory::HostVector<index_type>;
using device_index = memory::DeviceVector<index_type>;
using value_type = double;
using host_vector = memory::HostVector<value_type>;
using device_vector = memory::DeviceVector<value_type>;
bool test(host_vector const& reference, host_vector const& v) {
assert(reference.size() == v.size());
auto success = true;
for(auto i=0; i<reference.size(); ++i) {
if(reference[i] != v[i]) {
printf(" error %10d expected %5.1f got %5.1f\n",
(int)i, (float)(reference[i]), (float)(v[i]));
success = false;
}
}
return success;
}
void print(host_vector const& v) {
auto pos = 0;
while(pos<v.size()) {
auto col = 0;
while(col<32 && pos<v.size()) {
printf("%3.0f", v[pos]);
++pos;
++col;
}
printf("\n");
}
}
void print(host_index const& v) {
auto pos = 0;
while(pos<v.size()) {
auto col = 0;
while(col<32 && pos<v.size()) {
printf("%3d", v[pos]);
++pos;
++col;
}
printf("\n");
}
}
host_index generate_index(int n, int max_bucket_size) {
std::random_device rd;
std::default_random_engine e(rd());
std::uniform_int_distribution<int> rng(1, max_bucket_size);
std::cout << " == bucket size " << max_bucket_size << " ==" << std::endl;
std::cout << " == array size " << n << " ==" << std::endl;
// generate the index vector on the host
host_index index(n);
auto pos = 0;
auto m = 0;
while(pos<n) {
auto increment = rng(e);
auto final = std::min(pos+increment, n);
while(pos<final) {
index[pos++] = m;
}
++m;
}
return index;
}
host_index read_index(std::string fname) {
std::ifstream fid(fname);
if(!fid.is_open()) {
std::cerr << memory::util::red("error") << " : unable to open file "
<< memory::util::yellow(fname) << std::endl;
exit(1);
}
int n;
fid >> n;
std::cout << "loading index of length " << n << " from file " << fname << std::endl;
host_index index(n);
for(auto i=0; i<n; ++i) fid >> index[i];
return index;
}
int main(int argc, char** argv) {
int max_bucket_size = read_arg(argc, argv, 1, -1);
// input array of length n
// output array of length m
// sorted indexes in p (length n)
auto ph =
max_bucket_size < 1 ?
read_index("index.txt")
: generate_index(1<<25, max_bucket_size);
const auto n = ph.size();
auto m = ph[n-1];
// make reference solution
host_vector solution(m);
solution(0,m) = 0;
for(auto i : ph) {
solution[i] += 1;
}
if(n<=256) {
std::cout << "in \n"; print(ph);
std::cout << "out\n"; print(solution);
}
// configure cuda stream for timing
CudaStream stream_compute(false);
// push index to the device
device_index p = ph;
device_vector in(n);
std::vector<device_vector> out(3);
for(auto &o: out) {
o = device_vector(m);
}
in(memory::all) = value_type{1};
auto threads_per_block=256;
auto blocks=(n+threads_per_block-1)/threads_per_block;
for(auto &o: out) o(memory::all) = value_type{0};
auto b1 = stream_compute.insert_event();
gpu::reduce_by_index
<<<blocks, threads_per_block>>>
(in.data(), out[0].data(), p.data(), n);
auto e1 = stream_compute.insert_event();
e1.wait();
std::cout << " 1 " << e1.time_since(b1) << " seconds" << std::endl;
test(solution, host_vector(out[0]));
for(auto &o: out) o(memory::all) = value_type{0};
auto b2 = stream_compute.insert_event();
gpu::reduce_by_index<value_type>
<<<blocks, threads_per_block>>>
(in.data(), out[0].data(), out[1].data(), p.data(), n);
auto e2 = stream_compute.insert_event();
e2.wait();
std::cout << " 2 " << e2.time_since(b2) << " seconds" << std::endl;
test(solution, host_vector(out[0]));
test(solution, host_vector(out[1]));
for(auto &o: out) o(memory::all) = value_type{0};
auto b3 = stream_compute.insert_event();
gpu::reduce_by_index<value_type>
<<<blocks, threads_per_block>>>
(in.data(), out[0].data(), out[1].data(), out[2].data(), p.data(), n);
auto e3 = stream_compute.insert_event();
e3.wait();
std::cout << " 3 " << e3.time_since(b3) << " seconds" << std::endl;
test(solution, host_vector(out[0]));
test(solution, host_vector(out[1]));
test(solution, host_vector(out[2]));
return 0;
}
|
9de592d06f5a9dc9cf1449cebd661585716d24d1.hip | // !!! This is a file automatically generated by hipify!!!
#include <algorithm>
#include <cmath>
#include <memory>
#include <string>
#include <vector>
#include <cfloat>
#include <chrono>
#include <ctime>
#include <iomanip>
#include <iostream>
#include <map>
#include <random>
#include <sstream>
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include "cudnn.h"
#include "device_launch_parameters.h"
/** Error handling from https://developer.nvidia.com/cuDNN */
#define FatalError(s) \
do { \
std::stringstream _where, _message; \
_where << __FILE__ << ':' << __LINE__; \
_message << std::string(s) + "\n" << __FILE__ << ':' << __LINE__; \
std::cerr << _message.str() << "\nAborting...\n"; \
hipDeviceReset(); \
exit(1); \
} while (0)
#define checkCUDNN(status) \
do { \
std::stringstream _error; \
if (status != CUDNN_STATUS_SUCCESS) { \
_error << "CUDNN failure: " << cudnnGetErrorString(status); \
FatalError(_error.str()); \
} \
} while (0)
#define checkCudaErrors(status) \
do { \
std::stringstream _error; \
if (status != 0) { \
_error << "Cuda failure: " << status; \
FatalError(_error.str()); \
} \
} while (0)
/** Convolutional layer */
struct ConvolutionLayer {
int kernel_size;
int in_channels, in_height, in_width;
int out_channels, out_height, out_width;
std::vector<int8_t> pconv;
int pad_height;
int pad_width;
int stride_h;
int stride_v;
int dilation_h;
int dilation_w;
ConvolutionLayer(int in_channels_, int out_channels_, int kernel_size_,
int in_w_, int in_h_, int pad, int stride, int dilation)
: pconv(in_channels_ * kernel_size_ * kernel_size_ * out_channels_) {
in_channels = in_channels_;
out_channels = out_channels_;
kernel_size = kernel_size_;
in_width = in_w_;
in_height = in_h_;
out_width = in_w_ - kernel_size_ + 1;
out_height = in_h_ - kernel_size_ + 1;
pad_height = pad_width = pad;
stride_h = stride_v = stride;
dilation_h = dilation_w = dilation;
}
};
/** Training context */
struct TrainingContext {
cudnnHandle_t cudnnHandle;
cudnnTensorDescriptor_t dataTensor, conv1Tensor, conv1BiasTensor;
cudnnFilterDescriptor_t conv1filterDesc;
cudnnConvolutionDescriptor_t conv1Desc;
cudnnConvolutionFwdAlgo_t conv1algo;
int m_gpuid;
int m_batchSize;
size_t m_workspaceSize;
hipEvent_t start, stop;
double sum = 0.0;
// Disable copying
TrainingContext& operator=(const TrainingContext&) = delete;
TrainingContext(const TrainingContext&) = delete;
// Constructor
TrainingContext(int gpuid, int batch_size, ConvolutionLayer& conv1)
: m_gpuid(gpuid) {
m_batchSize = batch_size;
/** Create descriptors within the constructor.
* As instructed in the Usual manual, descriptors for
* input and output tensors, filter, and the forward
* convolution operator are created along with
* cuDNN handle.
*/
checkCudaErrors(hipSetDevice(gpuid));
checkCUDNN(cudnnCreate(&cudnnHandle));
checkCUDNN(cudnnCreateTensorDescriptor(&dataTensor));
checkCUDNN(cudnnCreateFilterDescriptor(&conv1filterDesc));
checkCUDNN(cudnnCreateConvolutionDescriptor(&conv1Desc));
checkCUDNN(cudnnCreateTensorDescriptor(&conv1Tensor));
// Initialize convolution forward pass
size_t workspaceSizeFromConv = SetFwdConvolutionTensors(
conv1, dataTensor, conv1Tensor, conv1filterDesc, conv1Desc, conv1algo);
m_workspaceSize = ::max((int)workspaceSizeFromConv, 0);
hipEventCreate(&start);
hipEventCreate(&stop);
}
~TrainingContext() {
checkCudaErrors(hipSetDevice(m_gpuid));
checkCUDNN(cudnnDestroy(cudnnHandle));
checkCUDNN(cudnnDestroyTensorDescriptor(dataTensor));
checkCUDNN(cudnnDestroyTensorDescriptor(conv1Tensor));
checkCUDNN(cudnnDestroyFilterDescriptor(conv1filterDesc));
checkCUDNN(cudnnDestroyConvolutionDescriptor(conv1Desc));
}
/** Set tensors and ops for forward pass */
size_t SetFwdConvolutionTensors(ConvolutionLayer& conv,
cudnnTensorDescriptor_t& srcTensorDesc,
cudnnTensorDescriptor_t& dstTensorDesc,
cudnnFilterDescriptor_t& filterDesc,
cudnnConvolutionDescriptor_t& convDesc,
cudnnConvolutionFwdAlgo_t& algo) {
int n = m_batchSize;
int c = conv.in_channels;
int h = conv.in_height;
int w = conv.in_width;
checkCUDNN(cudnnSetTensor4dDescriptor(srcTensorDesc, CUDNN_TENSOR_NHWC,
CUDNN_DATA_INT8, n, c, h, w));
checkCUDNN(cudnnSetFilter4dDescriptor(
filterDesc, CUDNN_DATA_INT8, CUDNN_TENSOR_NHWC, conv.out_channels,
conv.in_channels, conv.kernel_size, conv.kernel_size));
checkCUDNN(cudnnSetConvolution2dDescriptor(
convDesc, conv.pad_height, conv.pad_width, conv.stride_h, conv.stride_v,
conv.dilation_h, conv.dilation_w, CUDNN_CONVOLUTION, CUDNN_DATA_INT32));
checkCUDNN(cudnnGetConvolution2dForwardOutputDim(
convDesc, srcTensorDesc, filterDesc, &n, &c, &h, &w));
checkCUDNN(cudnnSetTensor4dDescriptor(dstTensorDesc, CUDNN_TENSOR_NHWC,
CUDNN_DATA_INT8, n, c, h, w));
algo = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM;
size_t sizeInBytes = 0;
checkCUDNN(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, srcTensorDesc, filterDesc, convDesc, dstTensorDesc, algo,
&sizeInBytes));
return sizeInBytes;
}
/** Execute forward pass */
void ForwardPropagation(void* data, void* result, void* weights,
void* workspace) {
float alpha = 1.0f;
float beta = 0.0f;
checkCudaErrors(hipSetDevice(m_gpuid));
hipEventRecord(start, 0);
checkCUDNN(cudnnConvolutionForward(cudnnHandle, &alpha, dataTensor, data,
conv1filterDesc, weights, conv1Desc,
conv1algo, workspace, m_workspaceSize,
&beta, conv1Tensor, result));
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float elapsed;
hipEventElapsedTime(&elapsed, start, stop);
elapsed /= 1000.0f;
sum += elapsed;
}
};
struct Workload {
int iterations;
int batch_size;
int width, height, channels;
int out_channels, kernel_size;
int pad, stride, dilation;
};
void test(const Workload& wkl) {
auto gpu = 0;
auto channels = wkl.channels;
auto out_channels = wkl.out_channels;
auto kernel_size = wkl.kernel_size;
auto width = wkl.width;
auto height = wkl.height;
auto pad = wkl.pad;
auto stride = wkl.stride;
auto dilation = wkl.dilation;
auto batch_size = wkl.batch_size;
auto iterations = wkl.iterations;
ConvolutionLayer conv1(channels, out_channels, kernel_size, width, height,
pad, stride, dilation);
TrainingContext context(gpu, batch_size, conv1);
// Initizlie convolution weight
std::mt19937 g(42);
std::uniform_int_distribution<int8_t> dconv1(-128, 127);
for (auto& iter : conv1.pconv) {
iter = dconv1(g);
}
// Initailize input image (batch size = 1)
std::vector<int8_t> img_float(1 * width * height * channels);
for (auto& iter : img_float) {
iter = dconv1(g);
}
// Allocate input and output on GPU; copy input over to GPU
int8_t *d_data, *d_conv1;
checkCudaErrors(hipMalloc(&d_data, sizeof(int8_t) * context.m_batchSize *
channels * height * width));
checkCudaErrors(hipMalloc(&d_conv1, sizeof(int32_t) * context.m_batchSize *
conv1.out_channels *
conv1.out_height * conv1.out_width));
checkCudaErrors(hipMemcpyAsync(
d_data, &img_float[0], sizeof(int8_t) * 1 * channels * width * height,
hipMemcpyHostToDevice));
// Allocate kernel on GPU
float* d_pconv1;
checkCudaErrors(hipMalloc(&d_pconv1, sizeof(int8_t) * conv1.pconv.size()));
checkCudaErrors(hipMemcpyAsync(d_pconv1, &conv1.pconv[0],
sizeof(int8_t) * conv1.pconv.size(),
hipMemcpyHostToDevice));
// Temporary buffers and workspaces
void* d_cudnn_workspace = nullptr;
if (context.m_workspaceSize > 0) {
checkCudaErrors(hipMalloc(&d_cudnn_workspace, context.m_workspaceSize));
}
// Start forward pass
checkCudaErrors(hipDeviceSynchronize());
for (int iter = 0; iter < iterations; ++iter) {
context.ForwardPropagation(d_data, d_conv1, d_pconv1, d_cudnn_workspace);
}
checkCudaErrors(hipDeviceSynchronize());
auto sum = context.sum;
auto num_flops = (long long)width * height * channels * out_channels *
batch_size * 2 * kernel_size * kernel_size;
auto GFLOPS = num_flops * 1.0 / sum * iterations / 1e9;
printf("%d %dx%d %d %d kernel %d Time: %f s, Time/Iter %f s, %.2f GFLOPS\n",
batch_size, width, height, channels, out_channels, kernel_size, sum,
sum / iterations, GFLOPS);
// Free data structures
checkCudaErrors(hipFree(d_data));
checkCudaErrors(hipFree(d_conv1));
checkCudaErrors(hipFree(d_pconv1));
if (d_cudnn_workspace != nullptr)
checkCudaErrors(hipFree(d_cudnn_workspace));
}
int main() {
// iterations, N, H, W, C_in, C_out, kernel, padding, stride, dilation
Workload wkls[]{{1000, 1, 7, 7, 512, 512, 1, 0, 1, 1},
{1000, 4, 7, 7, 512, 512, 1, 0, 1, 1},
{100, 128, 7, 7, 512, 512, 1, 0, 1, 1},
{1000, 1, 7, 7, 512, 512, 3, 1, 1, 1},
{1000, 4, 7, 7, 512, 512, 3, 1, 1, 1},
{100, 128, 7, 7, 512, 512, 3, 1, 1, 1},
{1000, 1, 14, 14, 256, 256, 1, 0, 1, 1},
{1000, 4, 14, 14, 256, 256, 1, 0, 1, 1},
{100, 128, 14, 14, 256, 256, 1, 0, 1, 1},
{1000, 1, 14, 14, 256, 256, 3, 1, 1, 1},
{1000, 4, 14, 14, 256, 256, 3, 1, 1, 1},
{100, 128, 14, 14, 256, 256, 3, 1, 1, 1},
{1000, 1, 56, 56, 64, 64, 1, 0, 1, 1},
{1000, 4, 56, 56, 64, 64, 1, 0, 1, 1},
{10, 128, 56, 56, 64, 64, 1, 0, 1, 1},
{1000, 1, 56, 56, 64, 64, 3, 1, 1, 1},
{1000, 4, 56, 56, 64, 64, 3, 1, 1, 1},
{10, 128, 56, 56, 64, 64, 3, 1, 1, 1}};
for (auto&& wkl : wkls) test(wkl);
return 0;
}
| 9de592d06f5a9dc9cf1449cebd661585716d24d1.cu | #include <algorithm>
#include <cmath>
#include <memory>
#include <string>
#include <vector>
#include <cfloat>
#include <chrono>
#include <ctime>
#include <iomanip>
#include <iostream>
#include <map>
#include <random>
#include <sstream>
#include "cuda.h"
#include "cuda_runtime.h"
#include "cudnn.h"
#include "device_launch_parameters.h"
/** Error handling from https://developer.nvidia.com/cuDNN */
#define FatalError(s) \
do { \
std::stringstream _where, _message; \
_where << __FILE__ << ':' << __LINE__; \
_message << std::string(s) + "\n" << __FILE__ << ':' << __LINE__; \
std::cerr << _message.str() << "\nAborting...\n"; \
cudaDeviceReset(); \
exit(1); \
} while (0)
#define checkCUDNN(status) \
do { \
std::stringstream _error; \
if (status != CUDNN_STATUS_SUCCESS) { \
_error << "CUDNN failure: " << cudnnGetErrorString(status); \
FatalError(_error.str()); \
} \
} while (0)
#define checkCudaErrors(status) \
do { \
std::stringstream _error; \
if (status != 0) { \
_error << "Cuda failure: " << status; \
FatalError(_error.str()); \
} \
} while (0)
/** Convolutional layer */
struct ConvolutionLayer {
int kernel_size;
int in_channels, in_height, in_width;
int out_channels, out_height, out_width;
std::vector<int8_t> pconv;
int pad_height;
int pad_width;
int stride_h;
int stride_v;
int dilation_h;
int dilation_w;
ConvolutionLayer(int in_channels_, int out_channels_, int kernel_size_,
int in_w_, int in_h_, int pad, int stride, int dilation)
: pconv(in_channels_ * kernel_size_ * kernel_size_ * out_channels_) {
in_channels = in_channels_;
out_channels = out_channels_;
kernel_size = kernel_size_;
in_width = in_w_;
in_height = in_h_;
out_width = in_w_ - kernel_size_ + 1;
out_height = in_h_ - kernel_size_ + 1;
pad_height = pad_width = pad;
stride_h = stride_v = stride;
dilation_h = dilation_w = dilation;
}
};
/** Training context */
struct TrainingContext {
cudnnHandle_t cudnnHandle;
cudnnTensorDescriptor_t dataTensor, conv1Tensor, conv1BiasTensor;
cudnnFilterDescriptor_t conv1filterDesc;
cudnnConvolutionDescriptor_t conv1Desc;
cudnnConvolutionFwdAlgo_t conv1algo;
int m_gpuid;
int m_batchSize;
size_t m_workspaceSize;
cudaEvent_t start, stop;
double sum = 0.0;
// Disable copying
TrainingContext& operator=(const TrainingContext&) = delete;
TrainingContext(const TrainingContext&) = delete;
// Constructor
TrainingContext(int gpuid, int batch_size, ConvolutionLayer& conv1)
: m_gpuid(gpuid) {
m_batchSize = batch_size;
/** Create descriptors within the constructor.
* As instructed in the Usual manual, descriptors for
* input and output tensors, filter, and the forward
* convolution operator are created along with
* cuDNN handle.
*/
checkCudaErrors(cudaSetDevice(gpuid));
checkCUDNN(cudnnCreate(&cudnnHandle));
checkCUDNN(cudnnCreateTensorDescriptor(&dataTensor));
checkCUDNN(cudnnCreateFilterDescriptor(&conv1filterDesc));
checkCUDNN(cudnnCreateConvolutionDescriptor(&conv1Desc));
checkCUDNN(cudnnCreateTensorDescriptor(&conv1Tensor));
// Initialize convolution forward pass
size_t workspaceSizeFromConv = SetFwdConvolutionTensors(
conv1, dataTensor, conv1Tensor, conv1filterDesc, conv1Desc, conv1algo);
m_workspaceSize = std::max((int)workspaceSizeFromConv, 0);
cudaEventCreate(&start);
cudaEventCreate(&stop);
}
~TrainingContext() {
checkCudaErrors(cudaSetDevice(m_gpuid));
checkCUDNN(cudnnDestroy(cudnnHandle));
checkCUDNN(cudnnDestroyTensorDescriptor(dataTensor));
checkCUDNN(cudnnDestroyTensorDescriptor(conv1Tensor));
checkCUDNN(cudnnDestroyFilterDescriptor(conv1filterDesc));
checkCUDNN(cudnnDestroyConvolutionDescriptor(conv1Desc));
}
/** Set tensors and ops for forward pass */
size_t SetFwdConvolutionTensors(ConvolutionLayer& conv,
cudnnTensorDescriptor_t& srcTensorDesc,
cudnnTensorDescriptor_t& dstTensorDesc,
cudnnFilterDescriptor_t& filterDesc,
cudnnConvolutionDescriptor_t& convDesc,
cudnnConvolutionFwdAlgo_t& algo) {
int n = m_batchSize;
int c = conv.in_channels;
int h = conv.in_height;
int w = conv.in_width;
checkCUDNN(cudnnSetTensor4dDescriptor(srcTensorDesc, CUDNN_TENSOR_NHWC,
CUDNN_DATA_INT8, n, c, h, w));
checkCUDNN(cudnnSetFilter4dDescriptor(
filterDesc, CUDNN_DATA_INT8, CUDNN_TENSOR_NHWC, conv.out_channels,
conv.in_channels, conv.kernel_size, conv.kernel_size));
checkCUDNN(cudnnSetConvolution2dDescriptor(
convDesc, conv.pad_height, conv.pad_width, conv.stride_h, conv.stride_v,
conv.dilation_h, conv.dilation_w, CUDNN_CONVOLUTION, CUDNN_DATA_INT32));
checkCUDNN(cudnnGetConvolution2dForwardOutputDim(
convDesc, srcTensorDesc, filterDesc, &n, &c, &h, &w));
checkCUDNN(cudnnSetTensor4dDescriptor(dstTensorDesc, CUDNN_TENSOR_NHWC,
CUDNN_DATA_INT8, n, c, h, w));
algo = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM;
size_t sizeInBytes = 0;
checkCUDNN(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, srcTensorDesc, filterDesc, convDesc, dstTensorDesc, algo,
&sizeInBytes));
return sizeInBytes;
}
/** Execute forward pass */
void ForwardPropagation(void* data, void* result, void* weights,
void* workspace) {
float alpha = 1.0f;
float beta = 0.0f;
checkCudaErrors(cudaSetDevice(m_gpuid));
cudaEventRecord(start, 0);
checkCUDNN(cudnnConvolutionForward(cudnnHandle, &alpha, dataTensor, data,
conv1filterDesc, weights, conv1Desc,
conv1algo, workspace, m_workspaceSize,
&beta, conv1Tensor, result));
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsed;
cudaEventElapsedTime(&elapsed, start, stop);
elapsed /= 1000.0f;
sum += elapsed;
}
};
struct Workload {
int iterations;
int batch_size;
int width, height, channels;
int out_channels, kernel_size;
int pad, stride, dilation;
};
void test(const Workload& wkl) {
auto gpu = 0;
auto channels = wkl.channels;
auto out_channels = wkl.out_channels;
auto kernel_size = wkl.kernel_size;
auto width = wkl.width;
auto height = wkl.height;
auto pad = wkl.pad;
auto stride = wkl.stride;
auto dilation = wkl.dilation;
auto batch_size = wkl.batch_size;
auto iterations = wkl.iterations;
ConvolutionLayer conv1(channels, out_channels, kernel_size, width, height,
pad, stride, dilation);
TrainingContext context(gpu, batch_size, conv1);
// Initizlie convolution weight
std::mt19937 g(42);
std::uniform_int_distribution<int8_t> dconv1(-128, 127);
for (auto& iter : conv1.pconv) {
iter = dconv1(g);
}
// Initailize input image (batch size = 1)
std::vector<int8_t> img_float(1 * width * height * channels);
for (auto& iter : img_float) {
iter = dconv1(g);
}
// Allocate input and output on GPU; copy input over to GPU
int8_t *d_data, *d_conv1;
checkCudaErrors(cudaMalloc(&d_data, sizeof(int8_t) * context.m_batchSize *
channels * height * width));
checkCudaErrors(cudaMalloc(&d_conv1, sizeof(int32_t) * context.m_batchSize *
conv1.out_channels *
conv1.out_height * conv1.out_width));
checkCudaErrors(cudaMemcpyAsync(
d_data, &img_float[0], sizeof(int8_t) * 1 * channels * width * height,
cudaMemcpyHostToDevice));
// Allocate kernel on GPU
float* d_pconv1;
checkCudaErrors(cudaMalloc(&d_pconv1, sizeof(int8_t) * conv1.pconv.size()));
checkCudaErrors(cudaMemcpyAsync(d_pconv1, &conv1.pconv[0],
sizeof(int8_t) * conv1.pconv.size(),
cudaMemcpyHostToDevice));
// Temporary buffers and workspaces
void* d_cudnn_workspace = nullptr;
if (context.m_workspaceSize > 0) {
checkCudaErrors(cudaMalloc(&d_cudnn_workspace, context.m_workspaceSize));
}
// Start forward pass
checkCudaErrors(cudaDeviceSynchronize());
for (int iter = 0; iter < iterations; ++iter) {
context.ForwardPropagation(d_data, d_conv1, d_pconv1, d_cudnn_workspace);
}
checkCudaErrors(cudaDeviceSynchronize());
auto sum = context.sum;
auto num_flops = (long long)width * height * channels * out_channels *
batch_size * 2 * kernel_size * kernel_size;
auto GFLOPS = num_flops * 1.0 / sum * iterations / 1e9;
printf("%d %dx%d %d %d kernel %d Time: %f s, Time/Iter %f s, %.2f GFLOPS\n",
batch_size, width, height, channels, out_channels, kernel_size, sum,
sum / iterations, GFLOPS);
// Free data structures
checkCudaErrors(cudaFree(d_data));
checkCudaErrors(cudaFree(d_conv1));
checkCudaErrors(cudaFree(d_pconv1));
if (d_cudnn_workspace != nullptr)
checkCudaErrors(cudaFree(d_cudnn_workspace));
}
int main() {
// iterations, N, H, W, C_in, C_out, kernel, padding, stride, dilation
Workload wkls[]{{1000, 1, 7, 7, 512, 512, 1, 0, 1, 1},
{1000, 4, 7, 7, 512, 512, 1, 0, 1, 1},
{100, 128, 7, 7, 512, 512, 1, 0, 1, 1},
{1000, 1, 7, 7, 512, 512, 3, 1, 1, 1},
{1000, 4, 7, 7, 512, 512, 3, 1, 1, 1},
{100, 128, 7, 7, 512, 512, 3, 1, 1, 1},
{1000, 1, 14, 14, 256, 256, 1, 0, 1, 1},
{1000, 4, 14, 14, 256, 256, 1, 0, 1, 1},
{100, 128, 14, 14, 256, 256, 1, 0, 1, 1},
{1000, 1, 14, 14, 256, 256, 3, 1, 1, 1},
{1000, 4, 14, 14, 256, 256, 3, 1, 1, 1},
{100, 128, 14, 14, 256, 256, 3, 1, 1, 1},
{1000, 1, 56, 56, 64, 64, 1, 0, 1, 1},
{1000, 4, 56, 56, 64, 64, 1, 0, 1, 1},
{10, 128, 56, 56, 64, 64, 1, 0, 1, 1},
{1000, 1, 56, 56, 64, 64, 3, 1, 1, 1},
{1000, 4, 56, 56, 64, 64, 3, 1, 1, 1},
{10, 128, 56, 56, 64, 64, 3, 1, 1, 1}};
for (auto&& wkl : wkls) test(wkl);
return 0;
}
|
4c5bd7c8869a13f7e031054b9f6d5a6f9341673e.hip | // !!! This is a file automatically generated by hipify!!!
#if __linux__ && defined(__INTEL_COMPILER)
#define __sync_fetch_and_add(ptr,addend) _InterlockedExchangeAdd(const_cast<void*>(reinterpret_cast<volatile void*>(ptr)), addend)
#endif
#include <string>
#include <cstring>
#include <cctype>
#include <cstdlib>
#include <cstdio>
#include <iostream>
#include <fstream>
#include <bitset>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include "tbb/concurrent_hash_map.h"
#include "tbb/blocked_range.h"
#include "tbb/parallel_for.h"
#include "tbb/tick_count.h"
#include "tbb/task_scheduler_init.h"
// #include "tbb/tbb_allocator.hz"
#include "utility.h"
#include "csv.hpp"
typedef std::basic_string<char,std::char_traits<char>,tbb::tbb_allocator<char> > MyString;
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#include <algorithm>
#include <cstdlib>
#include "util.h"
#include "timer.h"
using namespace tbb;
using namespace std;
static bool verbose = false;
static bool silent = false;
// const int size_factor = 2;
// typedef concurrent_hash_map<MyString,int> StringTable;
typedef concurrent_hash_map<MyString,std::vector<string>> StringTable;
std::vector<string> v_pair;
std::vector<string> v_count;
static MyString* Data;
std::vector<std::string> split_string_2(std::string str, char del) {
int first = 0;
int last = str.find_first_of(del);
std::vector<std::string> result;
while (first < str.size()) {
std::string subStr(str, first, last - first);
result.push_back(subStr);
first = last + 1;
last = str.find_first_of(del, first);
if (last == std::string::npos) {
last = str.size();
}
}
return result;
}
int main( int argc, char* argv[] ) {
// int counter = 0;
int N = atoi(argv[2]);
unsigned int t, travdirtime;
thrust::host_vector<long> h_vec_1_0(N);
thrust::host_vector<long> h_vec_2_0(N);
thrust::host_vector<long> h_vec_1_1(N);
thrust::host_vector<long> h_vec_2_1(N);
thrust::host_vector<long> h_vec_1_2(N);
thrust::host_vector<long> h_vec_2_2(N);
try {
tbb::tick_count mainStartTime = tbb::tick_count::now();
srand(2);
utility::thread_number_range threads(tbb::task_scheduler_init::default_num_threads,0);
if ( silent ) verbose = false;
Data = new MyString[N];
const string csv_file = std::string(argv[1]);
vector<vector<string>> data;
try {
Csv objCsv(csv_file);
if (!objCsv.getCsv(data)) {
cout << "read ERROR" << endl;
return 1;
}
std::remove("tmp0");
ofstream outputfile("tmp0");
for (int row = 0; row < data.size(); row++) {
vector<string> rec = data[row];
std::string timestamp = rec[0];
for(size_t c = timestamp.find_first_of("\""); c != string::npos; c = c = timestamp.find_first_of("\"")){
timestamp.erase(c,1);
}
for(size_t c = timestamp.find_first_of("."); c != string::npos; c = c = timestamp.find_first_of(".")){
timestamp.erase(c,1);
}
for(size_t c = timestamp.find_first_of(" "); c != string::npos; c = c = timestamp.find_first_of(" ")){
timestamp.erase(c,1);
}
for(size_t c = timestamp.find_first_of(":"); c != string::npos; c = c = timestamp.find_first_of(":")){
timestamp.erase(c,1);
}
for(size_t c = timestamp.find_first_of("/"); c != string::npos; c = c = timestamp.find_first_of("/")){
timestamp.erase(c,1);
}
h_vec_1_0.push_back(std::atol(timestamp.c_str()));
h_vec_2_0.push_back(1);
h_vec_1_1.push_back(std::atol(timestamp.c_str()));
h_vec_2_1.push_back(1);
h_vec_1_2.push_back(std::atol(timestamp.c_str()));
h_vec_2_2.push_back(1);
/*
if(row<(N/2))
{
h_vec_1_1.push_back(std::atol(timestamp.c_str()));
h_vec_2_1.push_back(1);
}
else
{
h_vec_1_2.push_back(std::atol(timestamp.c_str()));
h_vec_2_2.push_back(1);
}
*/
}
/*
tbb::tick_count mainStartTime = tbb::tick_count::now();
start_timer(&t);
thrust::device_vector<long> key_in_0 = h_vec_1_0;
thrust::device_vector<long> value_in_0 = h_vec_2_0;
thrust::sort(key_in_0.begin(), key_in_0.end());
thrust::device_vector<long> dkey_out_0(N,0);
thrust::device_vector<long> dvalue_out_0(N,0);
auto new_end_0 = thrust::reduce_by_key(key_in_0.begin(),
key_in_0.end(),
value_in_0.begin(),
dkey_out_0.begin(),
dvalue_out_0.begin());
long new_size_0 = new_end_0.first - dkey_out_0.begin();
travdirtime = stop_timer(&t);
print_timer(travdirtime);
utility::report_elapsed_time((tbb::tick_count::now() - mainStartTime).seconds());
for(long i=0; i <new_size_0; i++)
{
outputfile << dkey_out_0[i] << "," << dvalue_out_0[i] << endl;
}
*/
/* streams */
// tbb::tick_count mainStartTime = tbb::tick_count::now();
start_timer(&t);
mainStartTime = tbb::tick_count::now();
hipStream_t *stream = (hipStream_t *)malloc(sizeof(hipStream_t) * 2);
thrust::device_vector<long> key_in_1 = h_vec_1_1;
thrust::device_vector<long> value_in_1 = h_vec_2_1;
thrust::device_vector<long> key_in_2 = h_vec_1_2;
thrust::device_vector<long> value_in_2 = h_vec_2_2;
for (int i = 0; i < 2; i++)
hipStreamCreate(&stream[i]);
thrust::sort(thrust::hip::par.on(stream[0]), key_in_1.begin(), key_in_1.end());
thrust::sort(thrust::hip::par.on(stream[1]), key_in_2.begin(), key_in_2.end());
for (int i = 0; i < 2; i++)
hipStreamSynchronize(stream[i]);
for (int i = 0; i < 2; i++)
hipStreamDestroy(stream[i]);
thrust::device_vector<long> dkey_out_1(N,0);
thrust::device_vector<long> dvalue_out_1(N,0);
thrust::device_vector<long> dkey_out_2(N,0);
thrust::device_vector<long> dvalue_out_2(N,0);
for (int i = 0; i < 2; i++)
hipStreamCreate(&stream[i]);
auto new_end_1 = thrust::reduce_by_key(thrust::hip::par.on(stream[0]), key_in_1.begin(),
key_in_1.end(),
value_in_1.begin(),
dkey_out_1.begin(),
dvalue_out_1.begin());
auto new_end_2 = thrust::reduce_by_key(thrust::hip::par.on(stream[1]), key_in_2.begin(),
key_in_2.end(),
value_in_2.begin(),
dkey_out_2.begin(),
dvalue_out_2.begin());
for (int i = 0; i < 2; i++)
hipStreamSynchronize(stream[i]);
for (int i = 0; i < 2; i++)
hipStreamDestroy(stream[i]);
travdirtime = stop_timer(&t);
print_timer(travdirtime);
utility::report_elapsed_time((tbb::tick_count::now() - mainStartTime).seconds());
long new_size_1 = new_end_1.first - dkey_out_1.begin();
long new_size_2 = new_end_2.first - dkey_out_2.begin();
std::remove("tmp2");
ofstream outputfile2("tmp2");
for(long i=0; i <new_size_1; i++)
{
outputfile2 << dkey_out_1[i] << "," << dvalue_out_2[i] << endl;
}
outputfile2.close();
std::remove("tmp3");
ofstream outputfile3("tmp3");
for(long i=0; i <new_size_2; i++)
{
outputfile3 << dkey_out_2[i] << "," << dvalue_out_2[i] << endl;
}
outputfile3.close();
}
catch (...) {
cout << "EXCEPTION!" << endl;
return 1;
}
delete[] Data;
// utility::report_elapsed_time((tbb::tick_count::now() - mainStartTime).seconds());
return 0;
} catch(std::exception& e) {
std::cerr<<"error occurred. error text is :\"" <<e.what()<<"\"\n";
}
}
| 4c5bd7c8869a13f7e031054b9f6d5a6f9341673e.cu | #if __linux__ && defined(__INTEL_COMPILER)
#define __sync_fetch_and_add(ptr,addend) _InterlockedExchangeAdd(const_cast<void*>(reinterpret_cast<volatile void*>(ptr)), addend)
#endif
#include <string>
#include <cstring>
#include <cctype>
#include <cstdlib>
#include <cstdio>
#include <iostream>
#include <fstream>
#include <bitset>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include "tbb/concurrent_hash_map.h"
#include "tbb/blocked_range.h"
#include "tbb/parallel_for.h"
#include "tbb/tick_count.h"
#include "tbb/task_scheduler_init.h"
// #include "tbb/tbb_allocator.hz"
#include "utility.h"
#include "csv.hpp"
typedef std::basic_string<char,std::char_traits<char>,tbb::tbb_allocator<char> > MyString;
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#include <algorithm>
#include <cstdlib>
#include "util.h"
#include "timer.h"
using namespace tbb;
using namespace std;
static bool verbose = false;
static bool silent = false;
// const int size_factor = 2;
// typedef concurrent_hash_map<MyString,int> StringTable;
typedef concurrent_hash_map<MyString,std::vector<string>> StringTable;
std::vector<string> v_pair;
std::vector<string> v_count;
static MyString* Data;
std::vector<std::string> split_string_2(std::string str, char del) {
int first = 0;
int last = str.find_first_of(del);
std::vector<std::string> result;
while (first < str.size()) {
std::string subStr(str, first, last - first);
result.push_back(subStr);
first = last + 1;
last = str.find_first_of(del, first);
if (last == std::string::npos) {
last = str.size();
}
}
return result;
}
int main( int argc, char* argv[] ) {
// int counter = 0;
int N = atoi(argv[2]);
unsigned int t, travdirtime;
thrust::host_vector<long> h_vec_1_0(N);
thrust::host_vector<long> h_vec_2_0(N);
thrust::host_vector<long> h_vec_1_1(N);
thrust::host_vector<long> h_vec_2_1(N);
thrust::host_vector<long> h_vec_1_2(N);
thrust::host_vector<long> h_vec_2_2(N);
try {
tbb::tick_count mainStartTime = tbb::tick_count::now();
srand(2);
utility::thread_number_range threads(tbb::task_scheduler_init::default_num_threads,0);
if ( silent ) verbose = false;
Data = new MyString[N];
const string csv_file = std::string(argv[1]);
vector<vector<string>> data;
try {
Csv objCsv(csv_file);
if (!objCsv.getCsv(data)) {
cout << "read ERROR" << endl;
return 1;
}
std::remove("tmp0");
ofstream outputfile("tmp0");
for (int row = 0; row < data.size(); row++) {
vector<string> rec = data[row];
std::string timestamp = rec[0];
for(size_t c = timestamp.find_first_of("\""); c != string::npos; c = c = timestamp.find_first_of("\"")){
timestamp.erase(c,1);
}
for(size_t c = timestamp.find_first_of("."); c != string::npos; c = c = timestamp.find_first_of(".")){
timestamp.erase(c,1);
}
for(size_t c = timestamp.find_first_of(" "); c != string::npos; c = c = timestamp.find_first_of(" ")){
timestamp.erase(c,1);
}
for(size_t c = timestamp.find_first_of(":"); c != string::npos; c = c = timestamp.find_first_of(":")){
timestamp.erase(c,1);
}
for(size_t c = timestamp.find_first_of("/"); c != string::npos; c = c = timestamp.find_first_of("/")){
timestamp.erase(c,1);
}
h_vec_1_0.push_back(std::atol(timestamp.c_str()));
h_vec_2_0.push_back(1);
h_vec_1_1.push_back(std::atol(timestamp.c_str()));
h_vec_2_1.push_back(1);
h_vec_1_2.push_back(std::atol(timestamp.c_str()));
h_vec_2_2.push_back(1);
/*
if(row<(N/2))
{
h_vec_1_1.push_back(std::atol(timestamp.c_str()));
h_vec_2_1.push_back(1);
}
else
{
h_vec_1_2.push_back(std::atol(timestamp.c_str()));
h_vec_2_2.push_back(1);
}
*/
}
/*
tbb::tick_count mainStartTime = tbb::tick_count::now();
start_timer(&t);
thrust::device_vector<long> key_in_0 = h_vec_1_0;
thrust::device_vector<long> value_in_0 = h_vec_2_0;
thrust::sort(key_in_0.begin(), key_in_0.end());
thrust::device_vector<long> dkey_out_0(N,0);
thrust::device_vector<long> dvalue_out_0(N,0);
auto new_end_0 = thrust::reduce_by_key(key_in_0.begin(),
key_in_0.end(),
value_in_0.begin(),
dkey_out_0.begin(),
dvalue_out_0.begin());
long new_size_0 = new_end_0.first - dkey_out_0.begin();
travdirtime = stop_timer(&t);
print_timer(travdirtime);
utility::report_elapsed_time((tbb::tick_count::now() - mainStartTime).seconds());
for(long i=0; i <new_size_0; i++)
{
outputfile << dkey_out_0[i] << "," << dvalue_out_0[i] << endl;
}
*/
/* streams */
// tbb::tick_count mainStartTime = tbb::tick_count::now();
start_timer(&t);
mainStartTime = tbb::tick_count::now();
cudaStream_t *stream = (cudaStream_t *)malloc(sizeof(cudaStream_t) * 2);
thrust::device_vector<long> key_in_1 = h_vec_1_1;
thrust::device_vector<long> value_in_1 = h_vec_2_1;
thrust::device_vector<long> key_in_2 = h_vec_1_2;
thrust::device_vector<long> value_in_2 = h_vec_2_2;
for (int i = 0; i < 2; i++)
cudaStreamCreate(&stream[i]);
thrust::sort(thrust::cuda::par.on(stream[0]), key_in_1.begin(), key_in_1.end());
thrust::sort(thrust::cuda::par.on(stream[1]), key_in_2.begin(), key_in_2.end());
for (int i = 0; i < 2; i++)
cudaStreamSynchronize(stream[i]);
for (int i = 0; i < 2; i++)
cudaStreamDestroy(stream[i]);
thrust::device_vector<long> dkey_out_1(N,0);
thrust::device_vector<long> dvalue_out_1(N,0);
thrust::device_vector<long> dkey_out_2(N,0);
thrust::device_vector<long> dvalue_out_2(N,0);
for (int i = 0; i < 2; i++)
cudaStreamCreate(&stream[i]);
auto new_end_1 = thrust::reduce_by_key(thrust::cuda::par.on(stream[0]), key_in_1.begin(),
key_in_1.end(),
value_in_1.begin(),
dkey_out_1.begin(),
dvalue_out_1.begin());
auto new_end_2 = thrust::reduce_by_key(thrust::cuda::par.on(stream[1]), key_in_2.begin(),
key_in_2.end(),
value_in_2.begin(),
dkey_out_2.begin(),
dvalue_out_2.begin());
for (int i = 0; i < 2; i++)
cudaStreamSynchronize(stream[i]);
for (int i = 0; i < 2; i++)
cudaStreamDestroy(stream[i]);
travdirtime = stop_timer(&t);
print_timer(travdirtime);
utility::report_elapsed_time((tbb::tick_count::now() - mainStartTime).seconds());
long new_size_1 = new_end_1.first - dkey_out_1.begin();
long new_size_2 = new_end_2.first - dkey_out_2.begin();
std::remove("tmp2");
ofstream outputfile2("tmp2");
for(long i=0; i <new_size_1; i++)
{
outputfile2 << dkey_out_1[i] << "," << dvalue_out_2[i] << endl;
}
outputfile2.close();
std::remove("tmp3");
ofstream outputfile3("tmp3");
for(long i=0; i <new_size_2; i++)
{
outputfile3 << dkey_out_2[i] << "," << dvalue_out_2[i] << endl;
}
outputfile3.close();
}
catch (...) {
cout << "EXCEPTION!" << endl;
return 1;
}
delete[] Data;
// utility::report_elapsed_time((tbb::tick_count::now() - mainStartTime).seconds());
return 0;
} catch(std::exception& e) {
std::cerr<<"error occurred. error text is :\"" <<e.what()<<"\"\n";
}
}
|
38ebd1bb7d6816f82c88f965be270be66021e430.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
#include "crop_layer.h"
#include "utils.h"
#include "hip/hip_runtime.h"
#include "image.h"
__device__ float get_pixel_kernel(float *image, int w, int h, int x, int y, int c)
{
if(x < 0 || x >= w || y < 0 || y >= h) return 0;
return image[x + w*(y + c*h)];
}
__device__ float3 rgb_to_hsv_kernel(float3 rgb)
{
float r = rgb.x;
float g = rgb.y;
float b = rgb.z;
float h, s, v;
float max = (r > g) ? ( (r > b) ? r : b) : ( (g > b) ? g : b);
float min = (r < g) ? ( (r < b) ? r : b) : ( (g < b) ? g : b);
float delta = max - min;
v = max;
if(max == 0){
s = 0;
h = -1;
}else{
s = delta/max;
if(r == max){
h = (g - b) / delta;
} else if (g == max) {
h = 2 + (b - r) / delta;
} else {
h = 4 + (r - g) / delta;
}
if (h < 0) h += 6;
}
return make_float3(h, s, v);
}
__device__ float3 hsv_to_rgb_kernel(float3 hsv)
{
float h = hsv.x;
float s = hsv.y;
float v = hsv.z;
float r, g, b;
float f, p, q, t;
if (s == 0) {
r = g = b = v;
} else {
int index = (int) floorf(h);
f = h - index;
p = v*(1-s);
q = v*(1-s*f);
t = v*(1-s*(1-f));
if(index == 0){
r = v; g = t; b = p;
} else if(index == 1){
r = q; g = v; b = p;
} else if(index == 2){
r = p; g = v; b = t;
} else if(index == 3){
r = p; g = q; b = v;
} else if(index == 4){
r = t; g = p; b = v;
} else {
r = v; g = p; b = q;
}
}
r = (r < 0) ? 0 : ((r > 1) ? 1 : r);
g = (g < 0) ? 0 : ((g > 1) ? 1 : g);
b = (b < 0) ? 0 : ((b > 1) ? 1 : b);
return make_float3(r, g, b);
}
__device__ float bilinear_interpolate_kernel(float *image, int w, int h, float x, float y, int c)
{
int ix = (int) floorf(x);
int iy = (int) floorf(y);
float dx = x - ix;
float dy = y - iy;
float val = (1-dy) * (1-dx) * get_pixel_kernel(image, w, h, ix, iy, c) +
dy * (1-dx) * get_pixel_kernel(image, w, h, ix, iy+1, c) +
(1-dy) * dx * get_pixel_kernel(image, w, h, ix+1, iy, c) +
dy * dx * get_pixel_kernel(image, w, h, ix+1, iy+1, c);
return val;
}
__global__ void levels_image_kernel(float *image, float *rand, int batch, int w, int h, int train, float saturation, float exposure, float translate, float scale, float shift)
{
int size = batch * w * h;
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id >= size) return;
int x = id % w;
id /= w;
int y = id % h;
id /= h;
float rshift = rand[0];
float gshift = rand[1];
float bshift = rand[2];
float r0 = rand[8*id + 0];
float r1 = rand[8*id + 1];
float r2 = rand[8*id + 2];
float r3 = rand[8*id + 3];
saturation = r0*(saturation - 1) + 1;
saturation = (r1 > .5) ? 1./saturation : saturation;
exposure = r2*(exposure - 1) + 1;
exposure = (r3 > .5) ? 1./exposure : exposure;
size_t offset = id * h * w * 3;
image += offset;
float r = image[x + w*(y + h*0)];
float g = image[x + w*(y + h*1)];
float b = image[x + w*(y + h*2)];
float3 rgb = make_float3(r,g,b);
if(train){
float3 hsv = rgb_to_hsv_kernel(rgb);
hsv.y *= saturation;
hsv.z *= exposure;
rgb = hsv_to_rgb_kernel(hsv);
} else {
shift = 0;
}
image[x + w*(y + h*0)] = rgb.x*scale + translate + (rshift - .5)*shift;
image[x + w*(y + h*1)] = rgb.y*scale + translate + (gshift - .5)*shift;
image[x + w*(y + h*2)] = rgb.z*scale + translate + (bshift - .5)*shift;
}
__global__ void forward_crop_layer_kernel(float *input, float *rand, int size, int c, int h, int w, int crop_height, int crop_width, int train, int flip, float angle, float *output)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id >= size) return;
float cx = w/2.;
float cy = h/2.;
int count = id;
int j = id % crop_width;
id /= crop_width;
int i = id % crop_height;
id /= crop_height;
int k = id % c;
id /= c;
int b = id;
float r4 = rand[8*b + 4];
float r5 = rand[8*b + 5];
float r6 = rand[8*b + 6];
float r7 = rand[8*b + 7];
float dw = (w - crop_width)*r4;
float dh = (h - crop_height)*r5;
flip = (flip && (r6 > .5));
angle = 2*angle*r7 - angle;
if(!train){
dw = (w - crop_width)/2.;
dh = (h - crop_height)/2.;
flip = 0;
angle = 0;
}
input += w*h*c*b;
float x = (flip) ? w - dw - j - 1 : j + dw;
float y = i + dh;
float rx = cos(angle)*(x-cx) - sin(angle)*(y-cy) + cx;
float ry = sin(angle)*(x-cx) + cos(angle)*(y-cy) + cy;
output[count] = bilinear_interpolate_kernel(input, w, h, rx, ry, k);
}
extern "C" void forward_crop_layer_gpu(crop_layer layer, network_state state)
{
cuda_random(layer.rand_gpu, layer.batch*8);
float radians = layer.angle*3.14159265/180.;
float scale = 2;
float translate = -1;
if(layer.noadjust){
scale = 1;
translate = 0;
}
int size = layer.batch * layer.w * layer.h;
hipLaunchKernelGGL(( levels_image_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, get_cuda_stream() , state.input, layer.rand_gpu, layer.batch, layer.w, layer.h, state.train, layer.saturation, layer.exposure, translate, scale, layer.shift);
CHECK_CUDA(hipPeekAtLastError());
size = layer.batch*layer.c*layer.out_w*layer.out_h;
hipLaunchKernelGGL(( forward_crop_layer_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, get_cuda_stream() , state.input, layer.rand_gpu, size, layer.c, layer.h, layer.w, layer.out_h, layer.out_w, state.train, layer.flip, radians, layer.output_gpu);
CHECK_CUDA(hipPeekAtLastError());
/*
cuda_pull_array(layer.output_gpu, layer.output, size);
image im = float_to_image(layer.crop_width, layer.crop_height, layer.c, layer.output + 0*(size/layer.batch));
image im2 = float_to_image(layer.crop_width, layer.crop_height, layer.c, layer.output + 1*(size/layer.batch));
image im3 = float_to_image(layer.crop_width, layer.crop_height, layer.c, layer.output + 2*(size/layer.batch));
translate_image(im, -translate);
scale_image(im, 1/scale);
translate_image(im2, -translate);
scale_image(im2, 1/scale);
translate_image(im3, -translate);
scale_image(im3, 1/scale);
show_image(im, "cropped");
show_image(im2, "cropped2");
show_image(im3, "cropped3");
cvWaitKey(0);
*/
}
| 38ebd1bb7d6816f82c88f965be270be66021e430.cu | #include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
#include "crop_layer.h"
#include "utils.h"
#include "cuda.h"
#include "image.h"
__device__ float get_pixel_kernel(float *image, int w, int h, int x, int y, int c)
{
if(x < 0 || x >= w || y < 0 || y >= h) return 0;
return image[x + w*(y + c*h)];
}
__device__ float3 rgb_to_hsv_kernel(float3 rgb)
{
float r = rgb.x;
float g = rgb.y;
float b = rgb.z;
float h, s, v;
float max = (r > g) ? ( (r > b) ? r : b) : ( (g > b) ? g : b);
float min = (r < g) ? ( (r < b) ? r : b) : ( (g < b) ? g : b);
float delta = max - min;
v = max;
if(max == 0){
s = 0;
h = -1;
}else{
s = delta/max;
if(r == max){
h = (g - b) / delta;
} else if (g == max) {
h = 2 + (b - r) / delta;
} else {
h = 4 + (r - g) / delta;
}
if (h < 0) h += 6;
}
return make_float3(h, s, v);
}
__device__ float3 hsv_to_rgb_kernel(float3 hsv)
{
float h = hsv.x;
float s = hsv.y;
float v = hsv.z;
float r, g, b;
float f, p, q, t;
if (s == 0) {
r = g = b = v;
} else {
int index = (int) floorf(h);
f = h - index;
p = v*(1-s);
q = v*(1-s*f);
t = v*(1-s*(1-f));
if(index == 0){
r = v; g = t; b = p;
} else if(index == 1){
r = q; g = v; b = p;
} else if(index == 2){
r = p; g = v; b = t;
} else if(index == 3){
r = p; g = q; b = v;
} else if(index == 4){
r = t; g = p; b = v;
} else {
r = v; g = p; b = q;
}
}
r = (r < 0) ? 0 : ((r > 1) ? 1 : r);
g = (g < 0) ? 0 : ((g > 1) ? 1 : g);
b = (b < 0) ? 0 : ((b > 1) ? 1 : b);
return make_float3(r, g, b);
}
__device__ float bilinear_interpolate_kernel(float *image, int w, int h, float x, float y, int c)
{
int ix = (int) floorf(x);
int iy = (int) floorf(y);
float dx = x - ix;
float dy = y - iy;
float val = (1-dy) * (1-dx) * get_pixel_kernel(image, w, h, ix, iy, c) +
dy * (1-dx) * get_pixel_kernel(image, w, h, ix, iy+1, c) +
(1-dy) * dx * get_pixel_kernel(image, w, h, ix+1, iy, c) +
dy * dx * get_pixel_kernel(image, w, h, ix+1, iy+1, c);
return val;
}
__global__ void levels_image_kernel(float *image, float *rand, int batch, int w, int h, int train, float saturation, float exposure, float translate, float scale, float shift)
{
int size = batch * w * h;
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id >= size) return;
int x = id % w;
id /= w;
int y = id % h;
id /= h;
float rshift = rand[0];
float gshift = rand[1];
float bshift = rand[2];
float r0 = rand[8*id + 0];
float r1 = rand[8*id + 1];
float r2 = rand[8*id + 2];
float r3 = rand[8*id + 3];
saturation = r0*(saturation - 1) + 1;
saturation = (r1 > .5) ? 1./saturation : saturation;
exposure = r2*(exposure - 1) + 1;
exposure = (r3 > .5) ? 1./exposure : exposure;
size_t offset = id * h * w * 3;
image += offset;
float r = image[x + w*(y + h*0)];
float g = image[x + w*(y + h*1)];
float b = image[x + w*(y + h*2)];
float3 rgb = make_float3(r,g,b);
if(train){
float3 hsv = rgb_to_hsv_kernel(rgb);
hsv.y *= saturation;
hsv.z *= exposure;
rgb = hsv_to_rgb_kernel(hsv);
} else {
shift = 0;
}
image[x + w*(y + h*0)] = rgb.x*scale + translate + (rshift - .5)*shift;
image[x + w*(y + h*1)] = rgb.y*scale + translate + (gshift - .5)*shift;
image[x + w*(y + h*2)] = rgb.z*scale + translate + (bshift - .5)*shift;
}
__global__ void forward_crop_layer_kernel(float *input, float *rand, int size, int c, int h, int w, int crop_height, int crop_width, int train, int flip, float angle, float *output)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id >= size) return;
float cx = w/2.;
float cy = h/2.;
int count = id;
int j = id % crop_width;
id /= crop_width;
int i = id % crop_height;
id /= crop_height;
int k = id % c;
id /= c;
int b = id;
float r4 = rand[8*b + 4];
float r5 = rand[8*b + 5];
float r6 = rand[8*b + 6];
float r7 = rand[8*b + 7];
float dw = (w - crop_width)*r4;
float dh = (h - crop_height)*r5;
flip = (flip && (r6 > .5));
angle = 2*angle*r7 - angle;
if(!train){
dw = (w - crop_width)/2.;
dh = (h - crop_height)/2.;
flip = 0;
angle = 0;
}
input += w*h*c*b;
float x = (flip) ? w - dw - j - 1 : j + dw;
float y = i + dh;
float rx = cos(angle)*(x-cx) - sin(angle)*(y-cy) + cx;
float ry = sin(angle)*(x-cx) + cos(angle)*(y-cy) + cy;
output[count] = bilinear_interpolate_kernel(input, w, h, rx, ry, k);
}
extern "C" void forward_crop_layer_gpu(crop_layer layer, network_state state)
{
cuda_random(layer.rand_gpu, layer.batch*8);
float radians = layer.angle*3.14159265/180.;
float scale = 2;
float translate = -1;
if(layer.noadjust){
scale = 1;
translate = 0;
}
int size = layer.batch * layer.w * layer.h;
levels_image_kernel<<<cuda_gridsize(size), BLOCK, 0, get_cuda_stream() >>>(state.input, layer.rand_gpu, layer.batch, layer.w, layer.h, state.train, layer.saturation, layer.exposure, translate, scale, layer.shift);
CHECK_CUDA(cudaPeekAtLastError());
size = layer.batch*layer.c*layer.out_w*layer.out_h;
forward_crop_layer_kernel<<<cuda_gridsize(size), BLOCK, 0, get_cuda_stream() >>>(state.input, layer.rand_gpu, size, layer.c, layer.h, layer.w, layer.out_h, layer.out_w, state.train, layer.flip, radians, layer.output_gpu);
CHECK_CUDA(cudaPeekAtLastError());
/*
cuda_pull_array(layer.output_gpu, layer.output, size);
image im = float_to_image(layer.crop_width, layer.crop_height, layer.c, layer.output + 0*(size/layer.batch));
image im2 = float_to_image(layer.crop_width, layer.crop_height, layer.c, layer.output + 1*(size/layer.batch));
image im3 = float_to_image(layer.crop_width, layer.crop_height, layer.c, layer.output + 2*(size/layer.batch));
translate_image(im, -translate);
scale_image(im, 1/scale);
translate_image(im2, -translate);
scale_image(im2, 1/scale);
translate_image(im3, -translate);
scale_image(im3, 1/scale);
show_image(im, "cropped");
show_image(im2, "cropped2");
show_image(im3, "cropped3");
cvWaitKey(0);
*/
}
|
cc55ab8db3574459b97d1be3e7697c05ed5f970a.hip | // !!! This is a file automatically generated by hipify!!!
//#include "stdafx.h"
#include <stdio.h>
#include <math.h>
#include <time.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hip/hip_runtime.h>
struct pixel
{
unsigned char r;
unsigned char g;
unsigned char b;
};
__device__
int mini(int a,int b){
if(a>b)
return b;
else
return a;
}
__device__
int maxi(int a,int b){
if(a>b)
return a;
else
return b;
}
__device__
int substitution(long *a){
//key=135
*a=(*a+135)%256;
return *a;
}
__device__
int rev_substitution(long *a){
*a=(*a-135+256)%256;
return *a;
}
__device__
int block_cipher(long* a,long index){
//key= 0->45,1->127,2->5,3->255,5->90
switch(index%5){
case 0: *a=(*a-45)%256;
break;
case 1: *a=(*a-127)%256;
break;
case 2: *a=(*a-5)%256;
break;
case 3: *a=(*a-255)%256;
break;
case 4: *a=(*a-90)%256;
break;
default:break;
}
return *a;
}
__device__
int rev_block_cipher(long* a,long index){
//key= 0->45,1->127,2->5,3->255,5->90
switch(index%5){
case 0: *a=(*a+45+256)%256;
break;
case 1: *a=(*a+256+127)%256;
break;
case 2: *a=(*a+256+5)%256;
break;
case 3: *a=(*a+255+255)%256;
break;
case 4: *a=(*a+255+90)%256;
break;
default:break;
}
return *a;
}
__device__
int power(int a,int b){
long sum=1;
for (int i=1;i<=b;i++){
sum=sum*a;
}
return sum;
}
__device__
void hamminig_code(long *a)
{
long z=*a;
int d1,d2,d3,d4,d6,d7,d8,p1,p2,p3,tmp1,tmp2;
tmp2=power(10,7);
tmp1=z/tmp2;
tmp2=tmp1%10;
d1=tmp2;
tmp2=power(10,6);
tmp1=z/tmp2;
tmp2=tmp1%10;
d2=tmp2;
tmp2=power(10,5);
tmp1=z/tmp2;
tmp2=tmp1%10;
d3=tmp2;
tmp2=power(10,4);
tmp1=z/tmp2;
tmp2=tmp1%10;
d4=tmp2;
tmp1=z/100;
tmp2=tmp1%10;
d6=tmp2;
tmp1=z/10;
tmp2=tmp1%10;
d7=tmp2;
tmp2=(*a)%10;
d8=tmp2;
p1=d1^d2^d3;
p2=d1^d2^d4;
p3=d1^d3^d4;
*a=(*a)-100*d6-10*d7-d8+100*p1+10*p2+p3;
return ;
}
__device__
void bit_rotation(long *a)
{
long z=*a,tmp,tmp1,tmp2,p1,p2,p3;
tmp1=z/100;
tmp2=tmp1%10;
p1=tmp2;
tmp1=z/10;
tmp2=tmp1%10;
p2=tmp2;
tmp2=*a%10;
p3=tmp2;
*a=(*a)-100*p1-10*p2-p3;
tmp=p1;
p1=p2;
p2=p3;
p3=tmp;
*a=(*a)+100*p1+10*p2+p3;
return;
}
__device__
void rev_bit_rotation(long *a)
{
long z=*a,tmp1,tmp2,p1,p2,p3,tmp;
tmp1=z/power(10,2);
tmp2=tmp1%10;
p1=tmp2;
tmp1=z/power(10,1);
tmp2=tmp1%10;
p2=tmp2;
tmp2=*a%10;
p3=tmp2;
*a=(*a)-100*p1-10*p2-p3;
tmp=p3;
p3=p2;
p2=p1;
p1=tmp;
//printf("%d\n",*a);
*a=(*a)+100*p1+10*p2+p3;
//printf("%d",*a);
return;
}
__device__
void Torus_Auromorphism(int *a,int *b,int c)
{
//k=1
int x,y;
x = (*a+*b)%c;
y= (*a + 2*(*b))%c;
*a=x;*b=y;
return;
}
__device__
void Anti_Torus(int *a,int *b,int c)
{
int x,y;
x=(2*(*a)+(-1)*(*b)+100000*c)%c;
y=((-1)*(*a)+*b+10000*c)%c;
//printf("a=%d b=%d x=%d y=%d\n",*a,*b,x,y);
*a=x;*b=y;
//if(*a=2&&*b2)
//*a=x;*b=y;
}
__device__
int DecToBinary(int num)
{
int count=0, remainder, base = 1, binary = 0, no_of_1s = 0;
while (num > 0)
{
count++;
remainder = num % 2;
if (remainder == 1)
{
no_of_1s++;
}
binary = binary + remainder * base;
num = num / 2;
base = base * 10;
}
binary+=100000000;
// printf("binary = %d\n",binary);
return binary;
}
__device__
int BinToDec(int num){
num-=100000000;
int dec=0,k=1,i=0;
//printf("%d",num);
while(1){
dec=dec + k*(num%10);
if(i==0)
i=1;
else
i=2*i;
if(num==0)
{
break;
}
k=2*i;
num/=10;
}
//printf("\n%d",dec);
return dec;
}
__global__
void PictureKernel(struct pixel* IPimage, struct pixel* OPimage,long* R,long* G,long* B,int w,int h)
{
// Calculate the row # of the d_Pin and d_Pout element
int Col = blockIdx.x*blockDim.x + threadIdx.x;
// Calculate the column # of the d_Pin and d_Pout element
int Row = blockIdx.y*blockDim.y + threadIdx.y;
// each thread computes one element of d_Pout if in range
// long a,b,c;
//long i=Row*w+Col;
/*R[i]=IPimage[i].r;
G[i]=IPimage[i].g;
B[i]=IPimage[i].b;
rev_block_cipher(&R[i],i);rev_block_cipher(&G[i],i);rev_block_cipher(&B[i],i);
IPimage[i].r=R[i];
IPimage[i].g=G[i];
IPimage[i].b=B[i];
*/
long k=Row*w+Col;
long ar,br,cr,ag,bg,cg,ab,bb,cb;
if(Col%10==0){
cr=IPimage[(Row-1)*w+Col-1].r;
ar=IPimage[Row*w+Col-1].r;
br=IPimage[(Row-1)*w+Col].r;
//OPimage[k].r=(ar+br)/2;
if(cr>=maxi(ar,br))
OPimage[k].r=mini(ar,br);
else if(cr<=mini(ar,br))
OPimage[k].r=maxi(ar,br);
else
OPimage[k].r=ar+br-cr;
cg=IPimage[(Row-1)*w+Col-1].g;
ag=IPimage[Row*w+Col-1].g;
bg=IPimage[(Row-1)*w+Col].g;
//OPimage[k].g=(ag+bg)/2;
if(cg>=maxi(ag,bg))
OPimage[k].g=mini(ag,bg);
else if(cg<=mini(ag,bg))
OPimage[k].g=maxi(ag,bg);
else
OPimage[k].g=ag+bg-cg;
cb=IPimage[(Row-1)*w+Col-1].b;
ab=IPimage[Row*w+Col-1].b;
bb=IPimage[(Row-1)*w+Col].b;
if(cb>=maxi(ab,bb))
OPimage[k].b=mini(ab,bb);
else if(cb<=mini(ab,bb))
OPimage[k].b=maxi(ab,bb);
else
OPimage[k].b=ab+bb-cb;
}
else
OPimage[Row*w+Col]=IPimage[Col+Row*w];
return;
}
int main(void)
{
int i, w, h;
char blah[3];
FILE *f, *f2;
//clock_t begin = clock();
//clock_t begin1=clock();
f=fopen("sample.ppm", "rb");
f2=fopen("pllauthenticate.ppm", "wb");
fscanf(f, "%s\n", blah);
fscanf(f, "%d %d\n", &w, &h);
fscanf(f, "%d\n", &i);
struct pixel image[h][w];
fread(&image, sizeof(image), 1, f);
//double time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
//printf("%f",time_spent);
//long R[h][w],G[h][w],B[h][w];
struct pixel *d_A,*d_F;
long *d_R,*d_G,*d_B;
long n=w*h;
//const long size=n;
const long bytes = 3*sizeof(unsigned char)*n;
//Assigning memory in device
hipMalloc((void **)&d_A,sizeof(pixel)*n);
hipMalloc((void **)&d_F,sizeof(pixel)*n);
hipMalloc((void **)&d_R,sizeof(long)*n);
hipMalloc((void **)&d_G,sizeof(long)*n);
hipMalloc((void **)&d_B,sizeof(long)*n);
hipMemcpy(d_A,image,bytes,hipMemcpyHostToDevice);
dim3 threadsPerBlock(32, 32);
dim3 numBlocks(w/threadsPerBlock.x,h/threadsPerBlock.y);
clock_t begin = clock();
hipLaunchKernelGGL(( PictureKernel), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, d_A,d_F,d_R,d_G,d_B,w,h);
clock_t end = clock();
double time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
printf("%f micro seconds\n",time_spent*1000000);
hipMemcpy(image,d_F,bytes,hipMemcpyDeviceToHost);
hipFree(d_A);
hipFree(d_F);
hipFree(d_R);
hipFree(d_G);
hipFree(d_B);
fprintf(f2, "%s\n", blah);
fprintf(f2, "%d %d\n", w, h);
fprintf(f2, "%d\n", 255);
fwrite(&image, sizeof(image), 1, f2);
fclose(f);
fclose(f2);
return 0;
}
| cc55ab8db3574459b97d1be3e7697c05ed5f970a.cu | //#include "stdafx.h"
#include <stdio.h>
#include <math.h>
#include <time.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cuda.h>
struct pixel
{
unsigned char r;
unsigned char g;
unsigned char b;
};
__device__
int mini(int a,int b){
if(a>b)
return b;
else
return a;
}
__device__
int maxi(int a,int b){
if(a>b)
return a;
else
return b;
}
__device__
int substitution(long *a){
//key=135
*a=(*a+135)%256;
return *a;
}
__device__
int rev_substitution(long *a){
*a=(*a-135+256)%256;
return *a;
}
__device__
int block_cipher(long* a,long index){
//key= 0->45,1->127,2->5,3->255,5->90
switch(index%5){
case 0: *a=(*a-45)%256;
break;
case 1: *a=(*a-127)%256;
break;
case 2: *a=(*a-5)%256;
break;
case 3: *a=(*a-255)%256;
break;
case 4: *a=(*a-90)%256;
break;
default:break;
}
return *a;
}
__device__
int rev_block_cipher(long* a,long index){
//key= 0->45,1->127,2->5,3->255,5->90
switch(index%5){
case 0: *a=(*a+45+256)%256;
break;
case 1: *a=(*a+256+127)%256;
break;
case 2: *a=(*a+256+5)%256;
break;
case 3: *a=(*a+255+255)%256;
break;
case 4: *a=(*a+255+90)%256;
break;
default:break;
}
return *a;
}
__device__
int power(int a,int b){
long sum=1;
for (int i=1;i<=b;i++){
sum=sum*a;
}
return sum;
}
__device__
void hamminig_code(long *a)
{
long z=*a;
int d1,d2,d3,d4,d6,d7,d8,p1,p2,p3,tmp1,tmp2;
tmp2=power(10,7);
tmp1=z/tmp2;
tmp2=tmp1%10;
d1=tmp2;
tmp2=power(10,6);
tmp1=z/tmp2;
tmp2=tmp1%10;
d2=tmp2;
tmp2=power(10,5);
tmp1=z/tmp2;
tmp2=tmp1%10;
d3=tmp2;
tmp2=power(10,4);
tmp1=z/tmp2;
tmp2=tmp1%10;
d4=tmp2;
tmp1=z/100;
tmp2=tmp1%10;
d6=tmp2;
tmp1=z/10;
tmp2=tmp1%10;
d7=tmp2;
tmp2=(*a)%10;
d8=tmp2;
p1=d1^d2^d3;
p2=d1^d2^d4;
p3=d1^d3^d4;
*a=(*a)-100*d6-10*d7-d8+100*p1+10*p2+p3;
return ;
}
__device__
void bit_rotation(long *a)
{
long z=*a,tmp,tmp1,tmp2,p1,p2,p3;
tmp1=z/100;
tmp2=tmp1%10;
p1=tmp2;
tmp1=z/10;
tmp2=tmp1%10;
p2=tmp2;
tmp2=*a%10;
p3=tmp2;
*a=(*a)-100*p1-10*p2-p3;
tmp=p1;
p1=p2;
p2=p3;
p3=tmp;
*a=(*a)+100*p1+10*p2+p3;
return;
}
__device__
void rev_bit_rotation(long *a)
{
long z=*a,tmp1,tmp2,p1,p2,p3,tmp;
tmp1=z/power(10,2);
tmp2=tmp1%10;
p1=tmp2;
tmp1=z/power(10,1);
tmp2=tmp1%10;
p2=tmp2;
tmp2=*a%10;
p3=tmp2;
*a=(*a)-100*p1-10*p2-p3;
tmp=p3;
p3=p2;
p2=p1;
p1=tmp;
//printf("%d\n",*a);
*a=(*a)+100*p1+10*p2+p3;
//printf("%d",*a);
return;
}
__device__
void Torus_Auromorphism(int *a,int *b,int c)
{
//k=1
int x,y;
x = (*a+*b)%c;
y= (*a + 2*(*b))%c;
*a=x;*b=y;
return;
}
__device__
void Anti_Torus(int *a,int *b,int c)
{
int x,y;
x=(2*(*a)+(-1)*(*b)+100000*c)%c;
y=((-1)*(*a)+*b+10000*c)%c;
//printf("a=%d b=%d x=%d y=%d\n",*a,*b,x,y);
*a=x;*b=y;
//if(*a=2&&*b2)
//*a=x;*b=y;
}
__device__
int DecToBinary(int num)
{
int count=0, remainder, base = 1, binary = 0, no_of_1s = 0;
while (num > 0)
{
count++;
remainder = num % 2;
if (remainder == 1)
{
no_of_1s++;
}
binary = binary + remainder * base;
num = num / 2;
base = base * 10;
}
binary+=100000000;
// printf("binary = %d\n",binary);
return binary;
}
__device__
int BinToDec(int num){
num-=100000000;
int dec=0,k=1,i=0;
//printf("%d",num);
while(1){
dec=dec + k*(num%10);
if(i==0)
i=1;
else
i=2*i;
if(num==0)
{
break;
}
k=2*i;
num/=10;
}
//printf("\n%d",dec);
return dec;
}
__global__
void PictureKernel(struct pixel* IPimage, struct pixel* OPimage,long* R,long* G,long* B,int w,int h)
{
// Calculate the row # of the d_Pin and d_Pout element
int Col = blockIdx.x*blockDim.x + threadIdx.x;
// Calculate the column # of the d_Pin and d_Pout element
int Row = blockIdx.y*blockDim.y + threadIdx.y;
// each thread computes one element of d_Pout if in range
// long a,b,c;
//long i=Row*w+Col;
/*R[i]=IPimage[i].r;
G[i]=IPimage[i].g;
B[i]=IPimage[i].b;
rev_block_cipher(&R[i],i);rev_block_cipher(&G[i],i);rev_block_cipher(&B[i],i);
IPimage[i].r=R[i];
IPimage[i].g=G[i];
IPimage[i].b=B[i];
*/
long k=Row*w+Col;
long ar,br,cr,ag,bg,cg,ab,bb,cb;
if(Col%10==0){
cr=IPimage[(Row-1)*w+Col-1].r;
ar=IPimage[Row*w+Col-1].r;
br=IPimage[(Row-1)*w+Col].r;
//OPimage[k].r=(ar+br)/2;
if(cr>=maxi(ar,br))
OPimage[k].r=mini(ar,br);
else if(cr<=mini(ar,br))
OPimage[k].r=maxi(ar,br);
else
OPimage[k].r=ar+br-cr;
cg=IPimage[(Row-1)*w+Col-1].g;
ag=IPimage[Row*w+Col-1].g;
bg=IPimage[(Row-1)*w+Col].g;
//OPimage[k].g=(ag+bg)/2;
if(cg>=maxi(ag,bg))
OPimage[k].g=mini(ag,bg);
else if(cg<=mini(ag,bg))
OPimage[k].g=maxi(ag,bg);
else
OPimage[k].g=ag+bg-cg;
cb=IPimage[(Row-1)*w+Col-1].b;
ab=IPimage[Row*w+Col-1].b;
bb=IPimage[(Row-1)*w+Col].b;
if(cb>=maxi(ab,bb))
OPimage[k].b=mini(ab,bb);
else if(cb<=mini(ab,bb))
OPimage[k].b=maxi(ab,bb);
else
OPimage[k].b=ab+bb-cb;
}
else
OPimage[Row*w+Col]=IPimage[Col+Row*w];
return;
}
int main(void)
{
int i, w, h;
char blah[3];
FILE *f, *f2;
//clock_t begin = clock();
//clock_t begin1=clock();
f=fopen("sample.ppm", "rb");
f2=fopen("pllauthenticate.ppm", "wb");
fscanf(f, "%s\n", blah);
fscanf(f, "%d %d\n", &w, &h);
fscanf(f, "%d\n", &i);
struct pixel image[h][w];
fread(&image, sizeof(image), 1, f);
//double time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
//printf("%f",time_spent);
//long R[h][w],G[h][w],B[h][w];
struct pixel *d_A,*d_F;
long *d_R,*d_G,*d_B;
long n=w*h;
//const long size=n;
const long bytes = 3*sizeof(unsigned char)*n;
//Assigning memory in device
cudaMalloc((void **)&d_A,sizeof(pixel)*n);
cudaMalloc((void **)&d_F,sizeof(pixel)*n);
cudaMalloc((void **)&d_R,sizeof(long)*n);
cudaMalloc((void **)&d_G,sizeof(long)*n);
cudaMalloc((void **)&d_B,sizeof(long)*n);
cudaMemcpy(d_A,image,bytes,cudaMemcpyHostToDevice);
dim3 threadsPerBlock(32, 32);
dim3 numBlocks(w/threadsPerBlock.x,h/threadsPerBlock.y);
clock_t begin = clock();
PictureKernel<<<numBlocks,threadsPerBlock>>>(d_A,d_F,d_R,d_G,d_B,w,h);
clock_t end = clock();
double time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
printf("%f micro seconds\n",time_spent*1000000);
cudaMemcpy(image,d_F,bytes,cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_F);
cudaFree(d_R);
cudaFree(d_G);
cudaFree(d_B);
fprintf(f2, "%s\n", blah);
fprintf(f2, "%d %d\n", w, h);
fprintf(f2, "%d\n", 255);
fwrite(&image, sizeof(image), 1, f2);
fclose(f);
fclose(f2);
return 0;
}
|
c1c2d6fdbf99555e6032ba759a2bc27c906f0e4a.hip | // !!! This is a file automatically generated by hipify!!!
/* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// CUDA workspaces implementation
//
// @author [email protected]
//
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <exceptions/cuda_exception.h>
#include <helpers/logger.h>
#include <math/templatemath.h>
#include <stdio.h>
#include <stdlib.h>
#include <system/op_boilerplate.h>
#include <atomic>
#include <cstring>
#include "../Workspace.h"
namespace sd {
namespace memory {
Workspace::Workspace(ExternalWorkspace *external) {
if (external->sizeHost() > 0) {
_ptrHost = (char *)external->pointerHost();
_ptrDevice = (char *)external->pointerDevice();
_initialSize = external->sizeDevice();
_currentSize = external->sizeDevice();
_initialSizeSecondary = external->sizeHost();
_currentSizeSecondary = external->sizeHost();
_offset = 0L;
_offsetSecondary = 0L;
this->_cycleAllocations = 0;
this->_cycleAllocationsSecondary = 0;
this->_spillsSize = 0;
this->_spillsSizeSecondary = 0;
_externalized = true;
}
}
Workspace::Workspace(sd::LongType primarySize, sd::LongType secondarySize) {
if (secondarySize > 0) {
auto res = hipHostMalloc(reinterpret_cast<void **>(&_ptrHost), secondarySize, hipHostMallocDefault);
if (res != 0) throw cuda_exception::build("Can't allocate [HOST] memory", res);
hipMemset(this->_ptrHost, 0, secondarySize);
this->_allocatedHost = true;
} else
this->_allocatedHost = false;
if (primarySize > 0) {
auto res = hipMalloc(reinterpret_cast<void **>(&_ptrDevice), primarySize);
if (res != 0) throw cuda_exception::build("Can't allocate [DEVICE] memory", res);
hipMemset(this->_ptrDevice, 0, primarySize);
this->_allocatedDevice = true;
} else
this->_allocatedDevice = false;
this->_initialSize = primarySize;
this->_initialSizeSecondary = secondarySize;
this->_currentSize = primarySize;
this->_currentSizeSecondary = secondarySize;
this->_offset = 0;
this->_offsetSecondary = 0;
this->_cycleAllocations = 0;
this->_spillsSize = 0;
this->_spillsSizeSecondary = 0;
}
void Workspace::init(sd::LongType primaryBytes, sd::LongType secondaryBytes) {
if (this->_currentSize < primaryBytes) {
if (this->_allocatedDevice && !_externalized) hipFree((void *)this->_ptrDevice);
auto res = hipMalloc(reinterpret_cast<void **>(&_ptrDevice), secondaryBytes);
if (res != 0) throw cuda_exception::build("Can't allocate [DEVICE] memory", res);
hipMemset(this->_ptrDevice, 0, primaryBytes);
this->_currentSize = primaryBytes;
this->_allocatedDevice = true;
}
if (this->_currentSizeSecondary < secondaryBytes) {
if (this->_allocatedHost && !_externalized) hipHostFree((void *)this->_ptrHost);
auto res = hipHostMalloc(reinterpret_cast<void **>(&_ptrHost), secondaryBytes, hipHostMallocDefault);
if (res != 0) throw cuda_exception::build("Can't allocate [HOST] memory", res);
hipMemset(this->_ptrHost, 0, secondaryBytes);
this->_currentSizeSecondary = secondaryBytes;
this->_allocatedHost = true;
}
}
void Workspace::expandBy(sd::LongType numBytes, sd::LongType secondaryBytes) {
this->init(_currentSize + numBytes, _currentSizeSecondary + secondaryBytes);
}
void Workspace::expandTo(sd::LongType numBytes, sd::LongType secondaryBytes) { this->init(numBytes, secondaryBytes); }
void Workspace::freeSpills() {
_spillsSize = 0;
_spillsSizeSecondary = 0;
for (auto v : _spills) hipFree(v);
for (auto v : _spillsSecondary) hipHostFree(v);
_spills.clear();
_spillsSecondary.clear();
}
Workspace::~Workspace() {
if (this->_allocatedHost && !_externalized) hipHostFree((void *)this->_ptrHost);
if (this->_allocatedDevice && !_externalized) hipFree((void *)this->_ptrDevice);
freeSpills();
}
sd::LongType Workspace::getUsedSize() { return getCurrentOffset(); }
sd::LongType Workspace::getCurrentSize() { return _currentSize; }
sd::LongType Workspace::getCurrentOffset() { return _offset.load(); }
void *Workspace::allocateBytes(sd::LongType numBytes) { return allocateBytes(sd::memory::MemoryType::HOST, numBytes); }
sd::LongType Workspace::getAllocatedSize() { return getCurrentSize() + getSpilledSize(); }
void Workspace::scopeIn() {
freeSpills();
init(_cycleAllocations.load());
_cycleAllocations = 0;
}
void Workspace::scopeOut() { _offset = 0; }
sd::LongType Workspace::getSpilledSize() { return _spillsSize.load(); }
void *Workspace::allocateBytes(sd::memory::MemoryType type, sd::LongType numBytes) {
switch (type) {
case HOST: {
if (numBytes < 1)
throw allocation_exception::build("Number of [HOST] bytes for allocation should be positive", numBytes);
// numBytes += 32;
void *result = nullptr;
this->_cycleAllocationsSecondary += numBytes;
this->_mutexAllocation.lock();
if (_offsetSecondary.load() + numBytes > _currentSizeSecondary) {
sd_debug("Allocating %lld [HOST] bytes in spills\n", numBytes);
this->_mutexAllocation.unlock();
sd::Pointer p;
auto res = hipHostMalloc(reinterpret_cast<void **>(&p), numBytes, hipHostMallocDefault);
if (res != 0) throw cuda_exception::build("Can't allocate [HOST] memory", res);
_mutexSpills.lock();
_spillsSecondary.push_back(p);
_mutexSpills.unlock();
_spillsSizeSecondary += numBytes;
return p;
}
result = (void *)(_ptrHost + _offsetSecondary.load());
_offsetSecondary += numBytes;
// memset(result, 0, (int) numBytes);
sd_debug("Allocating %lld bytes from [HOST] workspace; Current PTR: %p; Current offset: %lld\n", numBytes, result,
_offset.load());
this->_mutexAllocation.unlock();
return result;
} break;
case DEVICE: {
if (numBytes < 1)
throw allocation_exception::build("Number of [DEVICE] bytes for allocation should be positive", numBytes);
// numBytes += 32;
void *result = nullptr;
this->_cycleAllocations += numBytes;
this->_mutexAllocation.lock();
if (_offset.load() + numBytes > _currentSize) {
sd_debug("Allocating %lld [DEVICE] bytes in spills\n", numBytes);
this->_mutexAllocation.unlock();
sd::Pointer p;
auto res = hipMalloc(reinterpret_cast<void **>(&p), numBytes);
if (res != 0) throw cuda_exception::build("Can't allocate [DEVICE] memory", res);
_mutexSpills.lock();
_spills.push_back(p);
_mutexSpills.unlock();
_spillsSize += numBytes;
return p;
}
result = (void *)(_ptrDevice + _offset.load());
_offset += numBytes;
// memset(result, 0, (int) numBytes);
sd_debug("Allocating %lld bytes from [DEVICE] workspace; Current PTR: %p; Current offset: %lld\n", numBytes,
result, _offset.load());
this->_mutexAllocation.unlock();
return result;
} break;
default:
throw std::runtime_error("Unknown MemoryType was passed in");
}
}
Workspace *Workspace::clone() {
// for clone we take whatever is higher: current allocated size, or allocated size of current loop
return new Workspace(sd::math::sd_max<sd::LongType>(this->getCurrentSize(), this->_cycleAllocations.load()));
}
sd::LongType Workspace::getAllocatedSecondarySize() { return getCurrentSecondarySize() + getSpilledSecondarySize(); }
sd::LongType Workspace::getCurrentSecondarySize() { return _currentSizeSecondary; }
sd::LongType Workspace::getCurrentSecondaryOffset() { return _offsetSecondary.load(); }
sd::LongType Workspace::getSpilledSecondarySize() { return _spillsSizeSecondary; }
sd::LongType Workspace::getUsedSecondarySize() { return getCurrentSecondaryOffset(); }
} // namespace memory
} // namespace sd
| c1c2d6fdbf99555e6032ba759a2bc27c906f0e4a.cu | /* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// CUDA workspaces implementation
//
// @author [email protected]
//
#include <cuda.h>
#include <cuda_runtime.h>
#include <exceptions/cuda_exception.h>
#include <helpers/logger.h>
#include <math/templatemath.h>
#include <stdio.h>
#include <stdlib.h>
#include <system/op_boilerplate.h>
#include <atomic>
#include <cstring>
#include "../Workspace.h"
namespace sd {
namespace memory {
Workspace::Workspace(ExternalWorkspace *external) {
if (external->sizeHost() > 0) {
_ptrHost = (char *)external->pointerHost();
_ptrDevice = (char *)external->pointerDevice();
_initialSize = external->sizeDevice();
_currentSize = external->sizeDevice();
_initialSizeSecondary = external->sizeHost();
_currentSizeSecondary = external->sizeHost();
_offset = 0L;
_offsetSecondary = 0L;
this->_cycleAllocations = 0;
this->_cycleAllocationsSecondary = 0;
this->_spillsSize = 0;
this->_spillsSizeSecondary = 0;
_externalized = true;
}
}
Workspace::Workspace(sd::LongType primarySize, sd::LongType secondarySize) {
if (secondarySize > 0) {
auto res = cudaHostAlloc(reinterpret_cast<void **>(&_ptrHost), secondarySize, cudaHostAllocDefault);
if (res != 0) throw cuda_exception::build("Can't allocate [HOST] memory", res);
cudaMemset(this->_ptrHost, 0, secondarySize);
this->_allocatedHost = true;
} else
this->_allocatedHost = false;
if (primarySize > 0) {
auto res = cudaMalloc(reinterpret_cast<void **>(&_ptrDevice), primarySize);
if (res != 0) throw cuda_exception::build("Can't allocate [DEVICE] memory", res);
cudaMemset(this->_ptrDevice, 0, primarySize);
this->_allocatedDevice = true;
} else
this->_allocatedDevice = false;
this->_initialSize = primarySize;
this->_initialSizeSecondary = secondarySize;
this->_currentSize = primarySize;
this->_currentSizeSecondary = secondarySize;
this->_offset = 0;
this->_offsetSecondary = 0;
this->_cycleAllocations = 0;
this->_spillsSize = 0;
this->_spillsSizeSecondary = 0;
}
void Workspace::init(sd::LongType primaryBytes, sd::LongType secondaryBytes) {
if (this->_currentSize < primaryBytes) {
if (this->_allocatedDevice && !_externalized) cudaFree((void *)this->_ptrDevice);
auto res = cudaMalloc(reinterpret_cast<void **>(&_ptrDevice), secondaryBytes);
if (res != 0) throw cuda_exception::build("Can't allocate [DEVICE] memory", res);
cudaMemset(this->_ptrDevice, 0, primaryBytes);
this->_currentSize = primaryBytes;
this->_allocatedDevice = true;
}
if (this->_currentSizeSecondary < secondaryBytes) {
if (this->_allocatedHost && !_externalized) cudaFreeHost((void *)this->_ptrHost);
auto res = cudaHostAlloc(reinterpret_cast<void **>(&_ptrHost), secondaryBytes, cudaHostAllocDefault);
if (res != 0) throw cuda_exception::build("Can't allocate [HOST] memory", res);
cudaMemset(this->_ptrHost, 0, secondaryBytes);
this->_currentSizeSecondary = secondaryBytes;
this->_allocatedHost = true;
}
}
void Workspace::expandBy(sd::LongType numBytes, sd::LongType secondaryBytes) {
this->init(_currentSize + numBytes, _currentSizeSecondary + secondaryBytes);
}
void Workspace::expandTo(sd::LongType numBytes, sd::LongType secondaryBytes) { this->init(numBytes, secondaryBytes); }
void Workspace::freeSpills() {
_spillsSize = 0;
_spillsSizeSecondary = 0;
for (auto v : _spills) cudaFree(v);
for (auto v : _spillsSecondary) cudaFreeHost(v);
_spills.clear();
_spillsSecondary.clear();
}
Workspace::~Workspace() {
if (this->_allocatedHost && !_externalized) cudaFreeHost((void *)this->_ptrHost);
if (this->_allocatedDevice && !_externalized) cudaFree((void *)this->_ptrDevice);
freeSpills();
}
sd::LongType Workspace::getUsedSize() { return getCurrentOffset(); }
sd::LongType Workspace::getCurrentSize() { return _currentSize; }
sd::LongType Workspace::getCurrentOffset() { return _offset.load(); }
void *Workspace::allocateBytes(sd::LongType numBytes) { return allocateBytes(sd::memory::MemoryType::HOST, numBytes); }
sd::LongType Workspace::getAllocatedSize() { return getCurrentSize() + getSpilledSize(); }
void Workspace::scopeIn() {
freeSpills();
init(_cycleAllocations.load());
_cycleAllocations = 0;
}
void Workspace::scopeOut() { _offset = 0; }
sd::LongType Workspace::getSpilledSize() { return _spillsSize.load(); }
void *Workspace::allocateBytes(sd::memory::MemoryType type, sd::LongType numBytes) {
switch (type) {
case HOST: {
if (numBytes < 1)
throw allocation_exception::build("Number of [HOST] bytes for allocation should be positive", numBytes);
// numBytes += 32;
void *result = nullptr;
this->_cycleAllocationsSecondary += numBytes;
this->_mutexAllocation.lock();
if (_offsetSecondary.load() + numBytes > _currentSizeSecondary) {
sd_debug("Allocating %lld [HOST] bytes in spills\n", numBytes);
this->_mutexAllocation.unlock();
sd::Pointer p;
auto res = cudaHostAlloc(reinterpret_cast<void **>(&p), numBytes, cudaHostAllocDefault);
if (res != 0) throw cuda_exception::build("Can't allocate [HOST] memory", res);
_mutexSpills.lock();
_spillsSecondary.push_back(p);
_mutexSpills.unlock();
_spillsSizeSecondary += numBytes;
return p;
}
result = (void *)(_ptrHost + _offsetSecondary.load());
_offsetSecondary += numBytes;
// memset(result, 0, (int) numBytes);
sd_debug("Allocating %lld bytes from [HOST] workspace; Current PTR: %p; Current offset: %lld\n", numBytes, result,
_offset.load());
this->_mutexAllocation.unlock();
return result;
} break;
case DEVICE: {
if (numBytes < 1)
throw allocation_exception::build("Number of [DEVICE] bytes for allocation should be positive", numBytes);
// numBytes += 32;
void *result = nullptr;
this->_cycleAllocations += numBytes;
this->_mutexAllocation.lock();
if (_offset.load() + numBytes > _currentSize) {
sd_debug("Allocating %lld [DEVICE] bytes in spills\n", numBytes);
this->_mutexAllocation.unlock();
sd::Pointer p;
auto res = cudaMalloc(reinterpret_cast<void **>(&p), numBytes);
if (res != 0) throw cuda_exception::build("Can't allocate [DEVICE] memory", res);
_mutexSpills.lock();
_spills.push_back(p);
_mutexSpills.unlock();
_spillsSize += numBytes;
return p;
}
result = (void *)(_ptrDevice + _offset.load());
_offset += numBytes;
// memset(result, 0, (int) numBytes);
sd_debug("Allocating %lld bytes from [DEVICE] workspace; Current PTR: %p; Current offset: %lld\n", numBytes,
result, _offset.load());
this->_mutexAllocation.unlock();
return result;
} break;
default:
throw std::runtime_error("Unknown MemoryType was passed in");
}
}
Workspace *Workspace::clone() {
// for clone we take whatever is higher: current allocated size, or allocated size of current loop
return new Workspace(sd::math::sd_max<sd::LongType>(this->getCurrentSize(), this->_cycleAllocations.load()));
}
sd::LongType Workspace::getAllocatedSecondarySize() { return getCurrentSecondarySize() + getSpilledSecondarySize(); }
sd::LongType Workspace::getCurrentSecondarySize() { return _currentSizeSecondary; }
sd::LongType Workspace::getCurrentSecondaryOffset() { return _offsetSecondary.load(); }
sd::LongType Workspace::getSpilledSecondarySize() { return _spillsSizeSecondary; }
sd::LongType Workspace::getUsedSecondarySize() { return getCurrentSecondaryOffset(); }
} // namespace memory
} // namespace sd
|
43f1a869f58b6c33bd286198fc5f05d0b7f7df3b.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <cmath>
#include <hip/hip_runtime.h>
__global__
void add(int n, float * x, float * y)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
{
y[i] = x[i] + y[i];
}
}
int main()
{
int N = 1<<20;
float *x, *y;
// Allocate Unified Memory accessible from CPU or GPU
hipMallocManaged(&x, N*sizeof(float));
hipMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++)
{
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run the kernel
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
// add<<<1, blockSize>>>(N, x, y);
hipLaunchKernelGGL(( add), dim3(numBlocks), dim3(blockSize), 0, 0, N, x, y);
// Wait for GPU to finish before accessing on host
hipDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
hipFree(x);
hipFree(y);
return 0;
} | 43f1a869f58b6c33bd286198fc5f05d0b7f7df3b.cu | #include <iostream>
#include <cmath>
#include <cuda.h>
__global__
void add(int n, float * x, float * y)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
{
y[i] = x[i] + y[i];
}
}
int main()
{
int N = 1<<20;
float *x, *y;
// Allocate Unified Memory – accessible from CPU or GPU
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++)
{
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run the kernel
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
// add<<<1, blockSize>>>(N, x, y);
add<<<numBlocks, blockSize>>>(N, x, y);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
cudaFree(x);
cudaFree(y);
return 0;
} |
0675883af25e09b845c5e1fa4e5b2248553056e9.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/scalar/scalar_device_view.cuh>
#include <cudf/strings/combine.hpp>
#include <cudf/strings/detail/combine.hpp>
#include <cudf/strings/detail/utilities.cuh>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/error.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/for_each.h>
#include <thrust/functional.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/transform_scan.h>
namespace cudf {
namespace strings {
namespace detail {
std::unique_ptr<column> join_strings(strings_column_view const& strings,
string_scalar const& separator,
string_scalar const& narep,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto strings_count = strings.size();
if (strings_count == 0) return make_empty_column(type_id::STRING);
CUDF_EXPECTS(separator.is_valid(stream), "Parameter separator must be a valid string_scalar");
string_view d_separator(separator.data(), separator.size());
auto d_narep = get_scalar_device_view(const_cast<string_scalar&>(narep));
auto strings_column = column_device_view::create(strings.parent(), stream);
auto d_strings = *strings_column;
// create an offsets array for building the output memory layout
rmm::device_uvector<size_type> output_offsets(strings_count + 1, stream);
auto d_output_offsets = output_offsets.data();
// using inclusive-scan to compute last entry which is the total size
thrust::transform_inclusive_scan(
rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings_count),
d_output_offsets + 1,
[d_strings, d_separator, d_narep] __device__(size_type idx) {
size_type bytes = 0;
if (d_strings.is_null(idx)) {
if (!d_narep.is_valid()) return 0; // skip nulls
bytes += d_narep.size();
} else
bytes += d_strings.element<string_view>(idx).size_bytes();
if ((idx + 1) < d_strings.size()) bytes += d_separator.size_bytes();
return bytes;
},
thrust::plus<size_type>());
output_offsets.set_element_to_zero_async(0, stream);
// total size is the last entry
size_type const bytes = output_offsets.back_element(stream);
// build offsets column (only 1 string so 2 offset entries)
auto offsets_column =
make_numeric_column(data_type{type_id::INT32}, 2, mask_state::UNALLOCATED, stream, mr);
auto offsets_view = offsets_column->mutable_view();
// set the first entry to 0 and the last entry to bytes
int32_t new_offsets[] = {0, static_cast<int32_t>(bytes)};
CUDF_CUDA_TRY(hipMemcpyAsync(offsets_view.data<int32_t>(),
new_offsets,
sizeof(new_offsets),
hipMemcpyHostToDevice,
stream.value()));
// build null mask
// only one entry so it is either all valid or all null
auto const null_count =
static_cast<size_type>(strings.null_count() == strings_count && !narep.is_valid(stream));
auto null_mask = null_count
? cudf::detail::create_null_mask(1, cudf::mask_state::ALL_NULL, stream, mr)
: rmm::device_buffer{0, stream, mr};
auto chars_column = create_chars_child_column(bytes, stream, mr);
auto d_chars = chars_column->mutable_view().data<char>();
thrust::for_each_n(
rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
strings_count,
[d_strings, d_separator, d_narep, d_output_offsets, d_chars] __device__(size_type idx) {
size_type offset = d_output_offsets[idx];
char* d_buffer = d_chars + offset;
if (d_strings.is_null(idx)) {
if (!d_narep.is_valid())
return; // do not write to buffer if element is null (including separator)
d_buffer = detail::copy_string(d_buffer, d_narep.value());
} else {
string_view d_str = d_strings.element<string_view>(idx);
d_buffer = detail::copy_string(d_buffer, d_str);
}
if ((idx + 1) < d_strings.size()) d_buffer = detail::copy_string(d_buffer, d_separator);
});
return make_strings_column(
1, std::move(offsets_column), std::move(chars_column), null_count, std::move(null_mask));
}
} // namespace detail
// external API
std::unique_ptr<column> join_strings(strings_column_view const& strings,
string_scalar const& separator,
string_scalar const& narep,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::join_strings(strings, separator, narep, cudf::default_stream_value, mr);
}
} // namespace strings
} // namespace cudf
| 0675883af25e09b845c5e1fa4e5b2248553056e9.cu | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/scalar/scalar_device_view.cuh>
#include <cudf/strings/combine.hpp>
#include <cudf/strings/detail/combine.hpp>
#include <cudf/strings/detail/utilities.cuh>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/error.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/for_each.h>
#include <thrust/functional.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/transform_scan.h>
namespace cudf {
namespace strings {
namespace detail {
std::unique_ptr<column> join_strings(strings_column_view const& strings,
string_scalar const& separator,
string_scalar const& narep,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto strings_count = strings.size();
if (strings_count == 0) return make_empty_column(type_id::STRING);
CUDF_EXPECTS(separator.is_valid(stream), "Parameter separator must be a valid string_scalar");
string_view d_separator(separator.data(), separator.size());
auto d_narep = get_scalar_device_view(const_cast<string_scalar&>(narep));
auto strings_column = column_device_view::create(strings.parent(), stream);
auto d_strings = *strings_column;
// create an offsets array for building the output memory layout
rmm::device_uvector<size_type> output_offsets(strings_count + 1, stream);
auto d_output_offsets = output_offsets.data();
// using inclusive-scan to compute last entry which is the total size
thrust::transform_inclusive_scan(
rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings_count),
d_output_offsets + 1,
[d_strings, d_separator, d_narep] __device__(size_type idx) {
size_type bytes = 0;
if (d_strings.is_null(idx)) {
if (!d_narep.is_valid()) return 0; // skip nulls
bytes += d_narep.size();
} else
bytes += d_strings.element<string_view>(idx).size_bytes();
if ((idx + 1) < d_strings.size()) bytes += d_separator.size_bytes();
return bytes;
},
thrust::plus<size_type>());
output_offsets.set_element_to_zero_async(0, stream);
// total size is the last entry
size_type const bytes = output_offsets.back_element(stream);
// build offsets column (only 1 string so 2 offset entries)
auto offsets_column =
make_numeric_column(data_type{type_id::INT32}, 2, mask_state::UNALLOCATED, stream, mr);
auto offsets_view = offsets_column->mutable_view();
// set the first entry to 0 and the last entry to bytes
int32_t new_offsets[] = {0, static_cast<int32_t>(bytes)};
CUDF_CUDA_TRY(cudaMemcpyAsync(offsets_view.data<int32_t>(),
new_offsets,
sizeof(new_offsets),
cudaMemcpyHostToDevice,
stream.value()));
// build null mask
// only one entry so it is either all valid or all null
auto const null_count =
static_cast<size_type>(strings.null_count() == strings_count && !narep.is_valid(stream));
auto null_mask = null_count
? cudf::detail::create_null_mask(1, cudf::mask_state::ALL_NULL, stream, mr)
: rmm::device_buffer{0, stream, mr};
auto chars_column = create_chars_child_column(bytes, stream, mr);
auto d_chars = chars_column->mutable_view().data<char>();
thrust::for_each_n(
rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
strings_count,
[d_strings, d_separator, d_narep, d_output_offsets, d_chars] __device__(size_type idx) {
size_type offset = d_output_offsets[idx];
char* d_buffer = d_chars + offset;
if (d_strings.is_null(idx)) {
if (!d_narep.is_valid())
return; // do not write to buffer if element is null (including separator)
d_buffer = detail::copy_string(d_buffer, d_narep.value());
} else {
string_view d_str = d_strings.element<string_view>(idx);
d_buffer = detail::copy_string(d_buffer, d_str);
}
if ((idx + 1) < d_strings.size()) d_buffer = detail::copy_string(d_buffer, d_separator);
});
return make_strings_column(
1, std::move(offsets_column), std::move(chars_column), null_count, std::move(null_mask));
}
} // namespace detail
// external API
std::unique_ptr<column> join_strings(strings_column_view const& strings,
string_scalar const& separator,
string_scalar const& narep,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::join_strings(strings, separator, narep, cudf::default_stream_value, mr);
}
} // namespace strings
} // namespace cudf
|
cf098fdc16800c53226fbc48fa6f2fa97042094a.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2016 Nicolas Weber and Sandra C. Amend / GCC / TU-Darmstadt. All rights reserved.
// Use of this source code is governed by the BSD 3-Clause license that can be
// found in the LICENSE file.
#include <cmath>
#include <cstdio>
#include <cstdint>
#include <chrono>
#include <hip/hip_runtime.h>
#define THREADS 128
#define WSIZE 32
#define TSIZE (THREADS / WSIZE)
#define TX threadIdx.x
#define PX (blockIdx.x * TSIZE + (TX / WSIZE))
#define PY blockIdx.y
#define WTHREAD (TX % WSIZE)
//-------------------------------------------------------------------
// SHARED
//-------------------------------------------------------------------
struct Params {
uint32_t oWidth;
uint32_t oHeight;
uint32_t iWidth;
uint32_t iHeight;
float pWidth;
float pHeight;
float lambda;
uint32_t repeat;
};
__device__ __forceinline__
void normalize(float4& var) {
var.x /= var.w;
var.y /= var.w;
var.z /= var.w;
var.w = 1.f;
}
__device__ __forceinline__
void add(float4& output, const uchar3& color, const float factor) {
output.x += color.x * factor;
output.y += color.y * factor;
output.z += color.z * factor;
output.w += factor;
}
__device__ __forceinline__
float lambda(const Params p, const float dist) {
if(p.lambda == 0.f)
return 1.f;
else if(p.lambda == 1.f)
return dist;
return powf(dist, p.lambda);
}
__device__ __forceinline__
void operator+=(float4& output, const float4 value) {
output.x += value.x;
output.y += value.y;
output.z += value.z;
output.w += value.w;
}
struct Local {
float sx, ex, sy, ey;
uint32_t sxr, syr, exr, eyr, xCount, yCount, pixelCount;
__device__ __forceinline__ Local(const Params& p) {
sx = fmaxf( PX * p.pWidth, 0.f);
ex = fminf((PX+1) * p.pWidth, (float)p.iWidth);
sy = fmaxf( PY * p.pHeight, 0.f);
ey = fminf((PY+1) * p.pHeight, (float)p.iHeight);
sxr = (uint32_t)floorf(sx);
syr = (uint32_t)floorf(sy);
exr = (uint32_t)ceilf(ex);
eyr = (uint32_t)ceilf(ey);
xCount = exr - sxr;
yCount = eyr - syr;
pixelCount = xCount * yCount;
}
};
__device__ __forceinline__
float contribution(const Local& l, float f, const uint32_t x, const uint32_t y) {
if(x < l.sx) f *= 1.f - (l.sx - x);
if((x+1.f) > l.ex) f *= 1.f - ((x+1.f) - l.ex);
if(y < l.sy) f *= 1.f - (l.sy - y);
if((y+1.f) > l.ey) f *= 1.f - ((y+1.f) - l.ey);
return f;
}
// https://devblogs.nvidia.com/parallelforall/faster-parallel-reductions-kepler/
__device__ __forceinline__
float4 __shfl_down(const float4 var, const uint32_t srcLane, const uint32_t width = 32) {
float4 output;
output.x = __shfl_down(var.x, srcLane, width);
output.y = __shfl_down(var.y, srcLane, width);
output.z = __shfl_down(var.z, srcLane, width);
output.w = __shfl_down(var.w, srcLane, width);
return output;
}
__device__ __forceinline__
void reduce(float4& value) {
value += __shfl_down(value, 16);
value += __shfl_down(value, 8);
value += __shfl_down(value, 4);
value += __shfl_down(value, 2);
value += __shfl_down(value, 1);
}
__device__ __forceinline__
float distance(const float4& avg, const uchar3& color) {
const float x = avg.x - color.x;
const float y = avg.y - color.y;
const float z = avg.z - color.z;
return sqrtf(x * x + y * y + z * z) / 441.6729559f; // L2-Norm / sqrt(255^2 * 3)
}
__global__
void kernelGuidance(const uchar3* __restrict__ input,
uchar3* __restrict__ patches, const Params p)
{
if(PX >= p.oWidth || PY >= p.oHeight) return;
// init
const Local l(p);
float4 color = make_float4(0.f, 0.f, 0.f, 0.f);
// iterate pixels
for(uint32_t i = WTHREAD; i < l.pixelCount; i += WSIZE) {
const uint32_t x = l.sxr + (i % l.xCount);
const uint32_t y = l.syr + (i / l.xCount);
float f = contribution(l, 1.f, x, y);
const uchar3& pixel = input[x + y * p.iWidth];
color += make_float4(pixel.x * f, pixel.y * f, pixel.z * f, f);
}
// reduce warps
reduce(color);
// store results
if((TX % 32) == 0) {
normalize(color);
patches[PX + PY * p.oWidth] = make_uchar3(color.x, color.y, color.z);
}
}
__device__ __forceinline__
float4 calcAverage(const Params& p, const uchar3* __restrict__ patches) {
const float corner = 1.0;
const float edge = 2.0;
const float center = 4.0;
// calculate average color
float4 avg = make_float4(0.f, 0.f, 0.f, 0.f);
// TOP
if(PY > 0) {
if(PX > 0)
add(avg, patches[(PX - 1) + (PY - 1) * p.oWidth], corner);
add(avg, patches[(PX) + (PY - 1) * p.oWidth], edge);
if((PX+1) < p.oWidth)
add(avg, patches[(PX + 1) + (PY - 1) * p.oWidth], corner);
}
// LEFT
if(PX > 0)
add(avg, patches[(PX - 1) + (PY) * p.oWidth], edge);
// CENTER
add(avg, patches[(PX) + (PY) * p.oWidth], center);
// RIGHT
if((PX+1) < p.oWidth)
add(avg, patches[(PX + 1) + (PY) * p.oWidth], edge);
// BOTTOM
if((PY+1) < p.oHeight) {
if(PX > 0)
add(avg, patches[(PX - 1) + (PY + 1) * p.oWidth], corner);
add(avg, patches[(PX) + (PY + 1) * p.oWidth], edge);
if((PX+1) < p.oWidth)
add(avg, patches[(PX + 1) + (PY + 1) * p.oWidth], corner);
}
normalize(avg);
return avg;
}
__global__
void kernelDownsampling(const uchar3* __restrict__ input,
const uchar3* __restrict__ patches,
const Params p,
uchar3* __restrict__ output)
{
if(PX >= p.oWidth || PY >= p.oHeight) return;
// init
const Local l(p);
const float4 avg = calcAverage(p, patches);
float4 color = make_float4(0.f, 0.f, 0.f, 0.f);
// iterate pixels
for(uint32_t i = WTHREAD; i < l.pixelCount; i += WSIZE) {
const uint32_t x = l.sxr + (i % l.xCount);
const uint32_t y = l.syr + (i / l.xCount);
const uchar3& pixel = input[x + y * p.iWidth];
float f = distance(avg, pixel);
f = lambda(p, f);
f = contribution(l, f, x, y);
add(color, pixel, f);
}
// reduce warp
reduce(color);
if(WTHREAD == 0) {
uchar3& ref = output[PX + PY * p.oWidth];
if(color.w == 0.0f)
ref = make_uchar3((unsigned char)avg.x, (unsigned char)avg.y, (unsigned char)avg.z);
else {
normalize(color);
ref = make_uchar3((unsigned char)color.x, (unsigned char)color.y, (unsigned char)color.z);
}
}
}
void run(const Params& p, const void* hInput, void* hOutput) {
const size_t sInput = sizeof(uchar3) * p.iWidth * p.iHeight;
const size_t sOutput = sizeof(uchar3) * p.oWidth * p.oHeight;
const size_t sGuidance = sizeof(uchar3) * p.oWidth * p.oHeight;
uchar3* dInput = 0, *dOutput = 0, *dGuidance = 0;
hipMalloc(&dInput, sInput);
hipMalloc(&dOutput, sOutput);
hipMalloc(&dGuidance, sGuidance);
hipMemcpy(dInput, hInput, sInput, hipMemcpyHostToDevice);
const dim3 threads(THREADS, 1, 1); // 4 warps, 1 warp per patch
const dim3 blocks((uint32_t)::ceil(p.oWidth / (float)TSIZE), p.oHeight, 1);
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (uint32_t i = 0; i < p.repeat; i++) {
hipLaunchKernelGGL(kernelGuidance, blocks, threads, 0, 0, dInput, dGuidance, p);
hipLaunchKernelGGL(kernelDownsampling, blocks, threads, 0, 0, dInput, dGuidance, p, dOutput);
}
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average kernel execution time %f (s)\n", (time * 1e-9f) / p.repeat);
hipMemcpy(hOutput, dOutput, sOutput, hipMemcpyDeviceToHost);
hipFree(dInput);
hipFree(dOutput);
hipFree(dGuidance);
}
| cf098fdc16800c53226fbc48fa6f2fa97042094a.cu | // Copyright (c) 2016 Nicolas Weber and Sandra C. Amend / GCC / TU-Darmstadt. All rights reserved.
// Use of this source code is governed by the BSD 3-Clause license that can be
// found in the LICENSE file.
#include <cmath>
#include <cstdio>
#include <cstdint>
#include <chrono>
#include <hip/hip_runtime.h>
#define THREADS 128
#define WSIZE 32
#define TSIZE (THREADS / WSIZE)
#define TX threadIdx.x
#define PX (blockIdx.x * TSIZE + (TX / WSIZE))
#define PY blockIdx.y
#define WTHREAD (TX % WSIZE)
//-------------------------------------------------------------------
// SHARED
//-------------------------------------------------------------------
struct Params {
uint32_t oWidth;
uint32_t oHeight;
uint32_t iWidth;
uint32_t iHeight;
float pWidth;
float pHeight;
float lambda;
uint32_t repeat;
};
__device__ __forceinline__
void normalize(float4& var) {
var.x /= var.w;
var.y /= var.w;
var.z /= var.w;
var.w = 1.f;
}
__device__ __forceinline__
void add(float4& output, const uchar3& color, const float factor) {
output.x += color.x * factor;
output.y += color.y * factor;
output.z += color.z * factor;
output.w += factor;
}
__device__ __forceinline__
float lambda(const Params p, const float dist) {
if(p.lambda == 0.f)
return 1.f;
else if(p.lambda == 1.f)
return dist;
return powf(dist, p.lambda);
}
__device__ __forceinline__
void operator+=(float4& output, const float4 value) {
output.x += value.x;
output.y += value.y;
output.z += value.z;
output.w += value.w;
}
struct Local {
float sx, ex, sy, ey;
uint32_t sxr, syr, exr, eyr, xCount, yCount, pixelCount;
__device__ __forceinline__ Local(const Params& p) {
sx = fmaxf( PX * p.pWidth, 0.f);
ex = fminf((PX+1) * p.pWidth, (float)p.iWidth);
sy = fmaxf( PY * p.pHeight, 0.f);
ey = fminf((PY+1) * p.pHeight, (float)p.iHeight);
sxr = (uint32_t)floorf(sx);
syr = (uint32_t)floorf(sy);
exr = (uint32_t)ceilf(ex);
eyr = (uint32_t)ceilf(ey);
xCount = exr - sxr;
yCount = eyr - syr;
pixelCount = xCount * yCount;
}
};
__device__ __forceinline__
float contribution(const Local& l, float f, const uint32_t x, const uint32_t y) {
if(x < l.sx) f *= 1.f - (l.sx - x);
if((x+1.f) > l.ex) f *= 1.f - ((x+1.f) - l.ex);
if(y < l.sy) f *= 1.f - (l.sy - y);
if((y+1.f) > l.ey) f *= 1.f - ((y+1.f) - l.ey);
return f;
}
// https://devblogs.nvidia.com/parallelforall/faster-parallel-reductions-kepler/
__device__ __forceinline__
float4 __shfl_down(const float4 var, const uint32_t srcLane, const uint32_t width = 32) {
float4 output;
output.x = __shfl_down(var.x, srcLane, width);
output.y = __shfl_down(var.y, srcLane, width);
output.z = __shfl_down(var.z, srcLane, width);
output.w = __shfl_down(var.w, srcLane, width);
return output;
}
__device__ __forceinline__
void reduce(float4& value) {
value += __shfl_down(value, 16);
value += __shfl_down(value, 8);
value += __shfl_down(value, 4);
value += __shfl_down(value, 2);
value += __shfl_down(value, 1);
}
__device__ __forceinline__
float distance(const float4& avg, const uchar3& color) {
const float x = avg.x - color.x;
const float y = avg.y - color.y;
const float z = avg.z - color.z;
return sqrtf(x * x + y * y + z * z) / 441.6729559f; // L2-Norm / sqrt(255^2 * 3)
}
__global__
void kernelGuidance(const uchar3* __restrict__ input,
uchar3* __restrict__ patches, const Params p)
{
if(PX >= p.oWidth || PY >= p.oHeight) return;
// init
const Local l(p);
float4 color = make_float4(0.f, 0.f, 0.f, 0.f);
// iterate pixels
for(uint32_t i = WTHREAD; i < l.pixelCount; i += WSIZE) {
const uint32_t x = l.sxr + (i % l.xCount);
const uint32_t y = l.syr + (i / l.xCount);
float f = contribution(l, 1.f, x, y);
const uchar3& pixel = input[x + y * p.iWidth];
color += make_float4(pixel.x * f, pixel.y * f, pixel.z * f, f);
}
// reduce warps
reduce(color);
// store results
if((TX % 32) == 0) {
normalize(color);
patches[PX + PY * p.oWidth] = make_uchar3(color.x, color.y, color.z);
}
}
__device__ __forceinline__
float4 calcAverage(const Params& p, const uchar3* __restrict__ patches) {
const float corner = 1.0;
const float edge = 2.0;
const float center = 4.0;
// calculate average color
float4 avg = make_float4(0.f, 0.f, 0.f, 0.f);
// TOP
if(PY > 0) {
if(PX > 0)
add(avg, patches[(PX - 1) + (PY - 1) * p.oWidth], corner);
add(avg, patches[(PX) + (PY - 1) * p.oWidth], edge);
if((PX+1) < p.oWidth)
add(avg, patches[(PX + 1) + (PY - 1) * p.oWidth], corner);
}
// LEFT
if(PX > 0)
add(avg, patches[(PX - 1) + (PY) * p.oWidth], edge);
// CENTER
add(avg, patches[(PX) + (PY) * p.oWidth], center);
// RIGHT
if((PX+1) < p.oWidth)
add(avg, patches[(PX + 1) + (PY) * p.oWidth], edge);
// BOTTOM
if((PY+1) < p.oHeight) {
if(PX > 0)
add(avg, patches[(PX - 1) + (PY + 1) * p.oWidth], corner);
add(avg, patches[(PX) + (PY + 1) * p.oWidth], edge);
if((PX+1) < p.oWidth)
add(avg, patches[(PX + 1) + (PY + 1) * p.oWidth], corner);
}
normalize(avg);
return avg;
}
__global__
void kernelDownsampling(const uchar3* __restrict__ input,
const uchar3* __restrict__ patches,
const Params p,
uchar3* __restrict__ output)
{
if(PX >= p.oWidth || PY >= p.oHeight) return;
// init
const Local l(p);
const float4 avg = calcAverage(p, patches);
float4 color = make_float4(0.f, 0.f, 0.f, 0.f);
// iterate pixels
for(uint32_t i = WTHREAD; i < l.pixelCount; i += WSIZE) {
const uint32_t x = l.sxr + (i % l.xCount);
const uint32_t y = l.syr + (i / l.xCount);
const uchar3& pixel = input[x + y * p.iWidth];
float f = distance(avg, pixel);
f = lambda(p, f);
f = contribution(l, f, x, y);
add(color, pixel, f);
}
// reduce warp
reduce(color);
if(WTHREAD == 0) {
uchar3& ref = output[PX + PY * p.oWidth];
if(color.w == 0.0f)
ref = make_uchar3((unsigned char)avg.x, (unsigned char)avg.y, (unsigned char)avg.z);
else {
normalize(color);
ref = make_uchar3((unsigned char)color.x, (unsigned char)color.y, (unsigned char)color.z);
}
}
}
void run(const Params& p, const void* hInput, void* hOutput) {
const size_t sInput = sizeof(uchar3) * p.iWidth * p.iHeight;
const size_t sOutput = sizeof(uchar3) * p.oWidth * p.oHeight;
const size_t sGuidance = sizeof(uchar3) * p.oWidth * p.oHeight;
uchar3* dInput = 0, *dOutput = 0, *dGuidance = 0;
hipMalloc(&dInput, sInput);
hipMalloc(&dOutput, sOutput);
hipMalloc(&dGuidance, sGuidance);
hipMemcpy(dInput, hInput, sInput, hipMemcpyHostToDevice);
const dim3 threads(THREADS, 1, 1); // 4 warps, 1 warp per patch
const dim3 blocks((uint32_t)std::ceil(p.oWidth / (float)TSIZE), p.oHeight, 1);
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (uint32_t i = 0; i < p.repeat; i++) {
hipLaunchKernelGGL(kernelGuidance, blocks, threads, 0, 0, dInput, dGuidance, p);
hipLaunchKernelGGL(kernelDownsampling, blocks, threads, 0, 0, dInput, dGuidance, p, dOutput);
}
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average kernel execution time %f (s)\n", (time * 1e-9f) / p.repeat);
hipMemcpy(hOutput, dOutput, sOutput, hipMemcpyDeviceToHost);
hipFree(dInput);
hipFree(dOutput);
hipFree(dGuidance);
}
|
ff52fe9f4494146019621687cc532b7bffe5a92f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ------------------------------------------------------------------
// Faster R-CNN
// Copyright (c) 2015 Microsoft
// Licensed under The MIT License [see fast-rcnn/LICENSE for details]
// Written by Shaoqing Ren
// ------------------------------------------------------------------
//#include "gpu_nms.hpp"
#include <vector>
#include <iostream>
#define CUDA_CHECK(condition) \
/* Code block avoids redefinition of hipError_t error */ \
do { \
hipError_t error = condition; \
if (error != hipSuccess) { \
std::cout << hipGetErrorString(error) << std::endl; \
} \
} while (0)
#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
int const threadsPerBlock = sizeof(unsigned long long) * 8;
__device__ inline float devIoU(float const * const a, float const * const b) {
float left = max(a[0], b[0]), right = min(a[2], b[2]);
float top = max(a[1], b[1]), bottom = min(a[3], b[3]);
float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f);
float interS = width * height;
float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1);
float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);
return interS / (Sa + Sb - interS);
}
__global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh,
const float *dev_boxes, unsigned long long *dev_mask) {
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size =
min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
__shared__ float block_boxes[threadsPerBlock * 5];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 5 + 0] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const float *cur_box = dev_boxes + cur_box_idx * 5;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
void _set_device(int device_id) {
int current_device;
CUDA_CHECK(hipGetDevice(¤t_device));
if (current_device == device_id) {
return;
}
// The call to hipSetDevice must come before any calls to Get, which
// may perform initialization using the GPU.
CUDA_CHECK(hipSetDevice(device_id));
}
void _nms(long* keep_out, int* num_out, const float* boxes_host, int boxes_num,
int boxes_dim, float nms_overlap_thresh, int device_id) {
_set_device(device_id);
float* boxes_dev = NULL;
unsigned long long* mask_dev = NULL;
const int col_blocks = DIVUP(boxes_num, threadsPerBlock);
CUDA_CHECK(hipMalloc(&boxes_dev,
boxes_num * boxes_dim * sizeof(float)));
CUDA_CHECK(hipMemcpy(boxes_dev,
boxes_host,
boxes_num * boxes_dim * sizeof(float),
hipMemcpyHostToDevice));
CUDA_CHECK(hipMalloc(&mask_dev,
boxes_num * col_blocks * sizeof(unsigned long long)));
dim3 blocks(DIVUP(boxes_num, threadsPerBlock),
DIVUP(boxes_num, threadsPerBlock));
dim3 threads(threadsPerBlock);
hipLaunchKernelGGL(( nms_kernel), dim3(blocks), dim3(threads), 0, 0, boxes_num,
nms_overlap_thresh,
boxes_dev,
mask_dev);
std::vector<unsigned long long> mask_host(boxes_num * col_blocks);
CUDA_CHECK(hipMemcpy(&mask_host[0],
mask_dev,
sizeof(unsigned long long) * boxes_num * col_blocks,
hipMemcpyDeviceToHost));
std::vector<unsigned long long> remv(col_blocks);
memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep_out[num_to_keep++] = i;
unsigned long long *p = &mask_host[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
*num_out = num_to_keep;
CUDA_CHECK(hipFree(boxes_dev));
CUDA_CHECK(hipFree(mask_dev));
}
/* Generated by Cython 0.24 */
#define PY_SSIZE_T_CLEAN
#include "Python.h"
#ifndef Py_PYTHON_H
#error Python headers needed to compile C extensions, please install development version of Python.
#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03020000)
#error Cython requires Python 2.6+ or Python 3.2+.
#else
#define CYTHON_ABI "0_24"
#include <stddef.h>
#ifndef offsetof
#define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
#endif
#if !defined(WIN32) && !defined(MS_WINDOWS)
#ifndef __stdcall
#define __stdcall
#endif
#ifndef __cdecl
#define __cdecl
#endif
#ifndef __fastcall
#define __fastcall
#endif
#endif
#ifndef DL_IMPORT
#define DL_IMPORT(t) t
#endif
#ifndef DL_EXPORT
#define DL_EXPORT(t) t
#endif
#ifndef PY_LONG_LONG
#define PY_LONG_LONG LONG_LONG
#endif
#ifndef Py_HUGE_VAL
#define Py_HUGE_VAL HUGE_VAL
#endif
#ifdef PYPY_VERSION
#define CYTHON_COMPILING_IN_PYPY 1
#define CYTHON_COMPILING_IN_CPYTHON 0
#else
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_CPYTHON 1
#endif
#if !defined(CYTHON_USE_PYLONG_INTERNALS) && CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x02070000
#define CYTHON_USE_PYLONG_INTERNALS 1
#endif
#if CYTHON_USE_PYLONG_INTERNALS
#include "longintrepr.h"
#undef SHIFT
#undef BASE
#undef MASK
#endif
#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag)
#define Py_OptimizeFlag 0
#endif
#define __PYX_BUILD_PY_SSIZE_T "n"
#define CYTHON_FORMAT_SSIZE_T "z"
#if PY_MAJOR_VERSION < 3
#define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#define __Pyx_DefaultClassType PyClass_Type
#else
#define __Pyx_BUILTIN_MODULE_NAME "builtins"
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#define __Pyx_DefaultClassType PyType_Type
#endif
#ifndef Py_TPFLAGS_CHECKTYPES
#define Py_TPFLAGS_CHECKTYPES 0
#endif
#ifndef Py_TPFLAGS_HAVE_INDEX
#define Py_TPFLAGS_HAVE_INDEX 0
#endif
#ifndef Py_TPFLAGS_HAVE_NEWBUFFER
#define Py_TPFLAGS_HAVE_NEWBUFFER 0
#endif
#ifndef Py_TPFLAGS_HAVE_FINALIZE
#define Py_TPFLAGS_HAVE_FINALIZE 0
#endif
#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND)
#define CYTHON_PEP393_ENABLED 1
#define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\
0 : _PyUnicode_Ready((PyObject *)(op)))
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i)
#define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u)
#define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u)
#define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i)
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u)))
#else
#define CYTHON_PEP393_ENABLED 0
#define __Pyx_PyUnicode_READY(op) (0)
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i]))
#define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE))
#define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u))
#define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i]))
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u))
#endif
#if CYTHON_COMPILING_IN_PYPY
#define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b)
#else
#define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\
PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b))
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains)
#define PyUnicode_Contains(u, s) PySequence_Contains(u, s)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format)
#define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc)
#define PyObject_Malloc(s) PyMem_Malloc(s)
#define PyObject_Free(p) PyMem_Free(p)
#define PyObject_Realloc(p) PyMem_Realloc(p)
#endif
#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b))
#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b)
#else
#define __Pyx_PyString_Format(a, b) PyString_Format(a, b)
#endif
#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII)
#define PyObject_ASCII(o) PyObject_Repr(o)
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBaseString_Type PyUnicode_Type
#define PyStringObject PyUnicodeObject
#define PyString_Type PyUnicode_Type
#define PyString_Check PyUnicode_Check
#define PyString_CheckExact PyUnicode_CheckExact
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj)
#define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj)
#else
#define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj))
#define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj))
#endif
#ifndef PySet_CheckExact
#define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type)
#endif
#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type)
#if PY_MAJOR_VERSION >= 3
#define PyIntObject PyLongObject
#define PyInt_Type PyLong_Type
#define PyInt_Check(op) PyLong_Check(op)
#define PyInt_CheckExact(op) PyLong_CheckExact(op)
#define PyInt_FromString PyLong_FromString
#define PyInt_FromUnicode PyLong_FromUnicode
#define PyInt_FromLong PyLong_FromLong
#define PyInt_FromSize_t PyLong_FromSize_t
#define PyInt_FromSsize_t PyLong_FromSsize_t
#define PyInt_AsLong PyLong_AsLong
#define PyInt_AS_LONG PyLong_AS_LONG
#define PyInt_AsSsize_t PyLong_AsSsize_t
#define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask
#define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask
#define PyNumber_Int PyNumber_Long
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBoolObject PyLongObject
#endif
#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY
#ifndef PyUnicode_InternFromString
#define PyUnicode_InternFromString(s) PyUnicode_FromString(s)
#endif
#endif
#if PY_VERSION_HEX < 0x030200A4
typedef long Py_hash_t;
#define __Pyx_PyInt_FromHash_t PyInt_FromLong
#define __Pyx_PyInt_AsHash_t PyInt_AsLong
#else
#define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t
#define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func))
#else
#define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass)
#endif
#if PY_VERSION_HEX >= 0x030500B1
#define __Pyx_PyAsyncMethodsStruct PyAsyncMethods
#define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async)
#elif CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
typedef struct {
unaryfunc am_await;
unaryfunc am_aiter;
unaryfunc am_anext;
} __Pyx_PyAsyncMethodsStruct;
#define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved))
#else
#define __Pyx_PyType_AsAsync(obj) NULL
#endif
#ifndef CYTHON_RESTRICT
#if defined(__GNUC__)
#define CYTHON_RESTRICT __restrict__
#elif defined(_MSC_VER) && _MSC_VER >= 1400
#define CYTHON_RESTRICT __restrict
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_RESTRICT restrict
#else
#define CYTHON_RESTRICT
#endif
#endif
#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None)
#ifndef __cplusplus
#error "Cython files generated with the C++ option must be compiled with a C++ compiler."
#endif
#ifndef CYTHON_INLINE
#define CYTHON_INLINE inline
#endif
template<typename T>
void __Pyx_call_destructor(T& x) {
x.~T();
}
template<typename T>
class __Pyx_FakeReference {
public:
__Pyx_FakeReference() : ptr(NULL) { }
__Pyx_FakeReference(const T& ref) : ptr(const_cast<T*>(&ref)) { }
T *operator->() { return ptr; }
operator T&() { return *ptr; }
private:
T *ptr;
};
#if defined(WIN32) || defined(MS_WINDOWS)
#define _USE_MATH_DEFINES
#endif
#include <math.h>
#ifdef NAN
#define __PYX_NAN() ((float) NAN)
#else
static CYTHON_INLINE float __PYX_NAN() {
float value;
memset(&value, 0xFF, sizeof(value));
return value;
}
#endif
#define __PYX_ERR(f_index, lineno, Ln_error) \
{ \
__pyx_filename = __pyx_f[f_index]; __pyx_lineno = lineno; __pyx_clineno = __LINE__; goto Ln_error; \
}
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y)
#else
#define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y)
#endif
#ifndef __PYX_EXTERN_C
#ifdef __cplusplus
#define __PYX_EXTERN_C extern "C"
#else
#define __PYX_EXTERN_C extern
#endif
#endif
#define __PYX_HAVE__nms__gpu_nms
#define __PYX_HAVE_API__nms__gpu_nms
#include "string.h"
#include "stdio.h"
#include "stdlib.h"
#include "numpy/arrayobject.h"
#include "numpy/ufuncobject.h"
#include "gpu_nms.hpp"
#ifdef _OPENMP
#include <omp.h>
#endif /* _OPENMP */
#ifdef PYREX_WITHOUT_ASSERTIONS
#define CYTHON_WITHOUT_ASSERTIONS
#endif
#ifndef CYTHON_UNUSED
# if defined(__GNUC__)
# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
#endif
#ifndef CYTHON_NCP_UNUSED
# if CYTHON_COMPILING_IN_CPYTHON
# define CYTHON_NCP_UNUSED
# else
# define CYTHON_NCP_UNUSED CYTHON_UNUSED
# endif
#endif
typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding;
const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry;
#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0
#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 0
#define __PYX_DEFAULT_STRING_ENCODING ""
#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#define __Pyx_uchar_cast(c) ((unsigned char)c)
#define __Pyx_long_cast(x) ((long)x)
#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\
(sizeof(type) < sizeof(Py_ssize_t)) ||\
(sizeof(type) > sizeof(Py_ssize_t) &&\
likely(v < (type)PY_SSIZE_T_MAX ||\
v == (type)PY_SSIZE_T_MAX) &&\
(!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\
v == (type)PY_SSIZE_T_MIN))) ||\
(sizeof(type) == sizeof(Py_ssize_t) &&\
(is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\
v == (type)PY_SSIZE_T_MAX))) )
#if defined (__cplusplus) && __cplusplus >= 201103L
#include <cstdlib>
#define __Pyx_sst_abs(value) std::abs(value)
#elif SIZEOF_INT >= SIZEOF_SIZE_T
#define __Pyx_sst_abs(value) abs(value)
#elif SIZEOF_LONG >= SIZEOF_SIZE_T
#define __Pyx_sst_abs(value) labs(value)
#elif defined (_MSC_VER) && defined (_M_X64)
#define __Pyx_sst_abs(value) _abs64(value)
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define __Pyx_sst_abs(value) llabs(value)
#elif defined (__GNUC__)
#define __Pyx_sst_abs(value) __builtin_llabs(value)
#else
#define __Pyx_sst_abs(value) ((value<0) ? -value : value)
#endif
static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject*);
static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length);
#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s))
#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l)
#define __Pyx_PyBytes_FromString PyBytes_FromString
#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*);
#if PY_MAJOR_VERSION < 3
#define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#else
#define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize
#endif
#define __Pyx_PyObject_AsSString(s) ((signed char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s)
#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s)
#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s)
#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s)
#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s)
#if PY_MAJOR_VERSION < 3
static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u)
{
const Py_UNICODE *u_end = u;
while (*u_end++) ;
return (size_t)(u_end - u - 1);
}
#else
#define __Pyx_Py_UNICODE_strlen Py_UNICODE_strlen
#endif
#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u))
#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode
#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode
#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj)
#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None)
#define __Pyx_PyBool_FromLong(b) ((b) ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False))
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);
static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x);
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t);
#if CYTHON_COMPILING_IN_CPYTHON
#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x))
#else
#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x)
#endif
#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x))
#else
#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x))
#endif
#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x))
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
static int __Pyx_sys_getdefaultencoding_not_ascii;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
PyObject* ascii_chars_u = NULL;
PyObject* ascii_chars_b = NULL;
const char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
if (strcmp(default_encoding_c, "ascii") == 0) {
__Pyx_sys_getdefaultencoding_not_ascii = 0;
} else {
char ascii_chars[128];
int c;
for (c = 0; c < 128; c++) {
ascii_chars[c] = c;
}
__Pyx_sys_getdefaultencoding_not_ascii = 1;
ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL);
if (!ascii_chars_u) goto bad;
ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL);
if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) {
PyErr_Format(
PyExc_ValueError,
"This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.",
default_encoding_c);
goto bad;
}
Py_DECREF(ascii_chars_u);
Py_DECREF(ascii_chars_b);
}
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
Py_XDECREF(ascii_chars_u);
Py_XDECREF(ascii_chars_b);
return -1;
}
#endif
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL)
#else
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL)
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
static char* __PYX_DEFAULT_STRING_ENCODING;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
__PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c));
if (!__PYX_DEFAULT_STRING_ENCODING) goto bad;
strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c);
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
return -1;
}
#endif
#endif
/* Test for GCC > 2.95 */
#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#else /* !__GNUC__ or GCC < 2.95 */
#define likely(x) (x)
#define unlikely(x) (x)
#endif /* __GNUC__ */
static PyObject *__pyx_m;
static PyObject *__pyx_d;
static PyObject *__pyx_b;
static PyObject *__pyx_empty_tuple;
static PyObject *__pyx_empty_bytes;
static PyObject *__pyx_empty_unicode;
static int __pyx_lineno;
static int __pyx_clineno = 0;
static const char * __pyx_cfilenm= __FILE__;
static const char *__pyx_filename;
/* None.proto */
#if !defined(CYTHON_CCOMPLEX)
#if defined(__cplusplus)
#define CYTHON_CCOMPLEX 1
#elif defined(_Complex_I)
#define CYTHON_CCOMPLEX 1
#else
#define CYTHON_CCOMPLEX 0
#endif
#endif
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
#include <complex>
#else
#include <complex.h>
#endif
#endif
#if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__)
#undef _Complex_I
#define _Complex_I 1.0fj
#endif
static const char *__pyx_f[] = {
"nms\\gpu_nms.pyx",
"__init__.pxd",
"type.pxd",
};
/* BufferFormatStructs.proto */
#define IS_UNSIGNED(type) (((type) -1) > 0)
struct __Pyx_StructField_;
#define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0)
typedef struct {
const char* name;
struct __Pyx_StructField_* fields;
size_t size;
size_t arraysize[8];
int ndim;
char typegroup;
char is_unsigned;
int flags;
} __Pyx_TypeInfo;
typedef struct __Pyx_StructField_ {
__Pyx_TypeInfo* type;
const char* name;
size_t offset;
} __Pyx_StructField;
typedef struct {
__Pyx_StructField* field;
size_t parent_offset;
} __Pyx_BufFmt_StackElem;
typedef struct {
__Pyx_StructField root;
__Pyx_BufFmt_StackElem* head;
size_t fmt_offset;
size_t new_count, enc_count;
size_t struct_alignment;
int is_complex;
char enc_type;
char new_packmode;
char enc_packmode;
char is_valid_array;
} __Pyx_BufFmt_Context;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":725
* # in Cython to enable them only on the right systems.
*
* ctypedef npy_int8 int8_t # <<<<<<<<<<<<<<
* ctypedef npy_int16 int16_t
* ctypedef npy_int32 int32_t
*/
typedef npy_int8 __pyx_t_5numpy_int8_t;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":726
*
* ctypedef npy_int8 int8_t
* ctypedef npy_int16 int16_t # <<<<<<<<<<<<<<
* ctypedef npy_int32 int32_t
* ctypedef npy_int64 int64_t
*/
typedef npy_int16 __pyx_t_5numpy_int16_t;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":727
* ctypedef npy_int8 int8_t
* ctypedef npy_int16 int16_t
* ctypedef npy_int32 int32_t # <<<<<<<<<<<<<<
* ctypedef npy_int64 int64_t
* #ctypedef npy_int96 int96_t
*/
typedef npy_int32 __pyx_t_5numpy_int32_t;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":728
* ctypedef npy_int16 int16_t
* ctypedef npy_int32 int32_t
* ctypedef npy_int64 int64_t # <<<<<<<<<<<<<<
* #ctypedef npy_int96 int96_t
* #ctypedef npy_int128 int128_t
*/
typedef npy_int64 __pyx_t_5numpy_int64_t;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":732
* #ctypedef npy_int128 int128_t
*
* ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<<
* ctypedef npy_uint16 uint16_t
* ctypedef npy_uint32 uint32_t
*/
typedef npy_uint8 __pyx_t_5numpy_uint8_t;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":733
*
* ctypedef npy_uint8 uint8_t
* ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<<
* ctypedef npy_uint32 uint32_t
* ctypedef npy_uint64 uint64_t
*/
typedef npy_uint16 __pyx_t_5numpy_uint16_t;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":734
* ctypedef npy_uint8 uint8_t
* ctypedef npy_uint16 uint16_t
* ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<<
* ctypedef npy_uint64 uint64_t
* #ctypedef npy_uint96 uint96_t
*/
typedef npy_uint32 __pyx_t_5numpy_uint32_t;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":735
* ctypedef npy_uint16 uint16_t
* ctypedef npy_uint32 uint32_t
* ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<<
* #ctypedef npy_uint96 uint96_t
* #ctypedef npy_uint128 uint128_t
*/
typedef npy_uint64 __pyx_t_5numpy_uint64_t;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":739
* #ctypedef npy_uint128 uint128_t
*
* ctypedef npy_float32 float32_t # <<<<<<<<<<<<<<
* ctypedef npy_float64 float64_t
* #ctypedef npy_float80 float80_t
*/
typedef npy_float32 __pyx_t_5numpy_float32_t;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":740
*
* ctypedef npy_float32 float32_t
* ctypedef npy_float64 float64_t # <<<<<<<<<<<<<<
* #ctypedef npy_float80 float80_t
* #ctypedef npy_float128 float128_t
*/
typedef npy_float64 __pyx_t_5numpy_float64_t;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":749
* # The int types are mapped a bit surprising --
* # numpy.int corresponds to 'l' and numpy.long to 'q'
* ctypedef npy_long int_t # <<<<<<<<<<<<<<
* ctypedef npy_longlong long_t
* ctypedef npy_longlong longlong_t
*/
typedef npy_long __pyx_t_5numpy_int_t;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":750
* # numpy.int corresponds to 'l' and numpy.long to 'q'
* ctypedef npy_long int_t
* ctypedef npy_longlong long_t # <<<<<<<<<<<<<<
* ctypedef npy_longlong longlong_t
*
*/
typedef npy_longlong __pyx_t_5numpy_long_t;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":751
* ctypedef npy_long int_t
* ctypedef npy_longlong long_t
* ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<<
*
* ctypedef npy_ulong uint_t
*/
typedef npy_longlong __pyx_t_5numpy_longlong_t;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":753
* ctypedef npy_longlong longlong_t
*
* ctypedef npy_ulong uint_t # <<<<<<<<<<<<<<
* ctypedef npy_ulonglong ulong_t
* ctypedef npy_ulonglong ulonglong_t
*/
typedef npy_ulong __pyx_t_5numpy_uint_t;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":754
*
* ctypedef npy_ulong uint_t
* ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<<
* ctypedef npy_ulonglong ulonglong_t
*
*/
typedef npy_ulonglong __pyx_t_5numpy_ulong_t;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":755
* ctypedef npy_ulong uint_t
* ctypedef npy_ulonglong ulong_t
* ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<<
*
* ctypedef npy_intp intp_t
*/
typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":757
* ctypedef npy_ulonglong ulonglong_t
*
* ctypedef npy_intp intp_t # <<<<<<<<<<<<<<
* ctypedef npy_uintp uintp_t
*
*/
typedef npy_intp __pyx_t_5numpy_intp_t;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":758
*
* ctypedef npy_intp intp_t
* ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<<
*
* ctypedef npy_double float_t
*/
typedef npy_uintp __pyx_t_5numpy_uintp_t;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":760
* ctypedef npy_uintp uintp_t
*
* ctypedef npy_double float_t # <<<<<<<<<<<<<<
* ctypedef npy_double double_t
* ctypedef npy_longdouble longdouble_t
*/
typedef npy_double __pyx_t_5numpy_float_t;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":761
*
* ctypedef npy_double float_t
* ctypedef npy_double double_t # <<<<<<<<<<<<<<
* ctypedef npy_longdouble longdouble_t
*
*/
typedef npy_double __pyx_t_5numpy_double_t;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":762
* ctypedef npy_double float_t
* ctypedef npy_double double_t
* ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<<
*
* ctypedef npy_cfloat cfloat_t
*/
typedef npy_longdouble __pyx_t_5numpy_longdouble_t;
/* None.proto */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
typedef ::std::complex< float > __pyx_t_float_complex;
#else
typedef float _Complex __pyx_t_float_complex;
#endif
#else
typedef struct { float real, imag; } __pyx_t_float_complex;
#endif
/* None.proto */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
typedef ::std::complex< double > __pyx_t_double_complex;
#else
typedef double _Complex __pyx_t_double_complex;
#endif
#else
typedef struct { double real, imag; } __pyx_t_double_complex;
#endif
/*--- Type declarations ---*/
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":764
* ctypedef npy_longdouble longdouble_t
*
* ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<<
* ctypedef npy_cdouble cdouble_t
* ctypedef npy_clongdouble clongdouble_t
*/
typedef npy_cfloat __pyx_t_5numpy_cfloat_t;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":765
*
* ctypedef npy_cfloat cfloat_t
* ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<<
* ctypedef npy_clongdouble clongdouble_t
*
*/
typedef npy_cdouble __pyx_t_5numpy_cdouble_t;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":766
* ctypedef npy_cfloat cfloat_t
* ctypedef npy_cdouble cdouble_t
* ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<<
*
* ctypedef npy_cdouble complex_t
*/
typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":768
* ctypedef npy_clongdouble clongdouble_t
*
* ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew1(a):
*/
typedef npy_cdouble __pyx_t_5numpy_complex_t;
/* --- Runtime support code (head) --- */
/* Refnanny.proto */
#ifndef CYTHON_REFNANNY
#define CYTHON_REFNANNY 0
#endif
#if CYTHON_REFNANNY
typedef struct {
void (*INCREF)(void*, PyObject*, int);
void (*DECREF)(void*, PyObject*, int);
void (*GOTREF)(void*, PyObject*, int);
void (*GIVEREF)(void*, PyObject*, int);
void* (*SetupContext)(const char*, int, const char*);
void (*FinishContext)(void**);
} __Pyx_RefNannyAPIStruct;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname);
#define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL;
#ifdef WITH_THREAD
#define __Pyx_RefNannySetupContext(name, acquire_gil)\
if (acquire_gil) {\
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
PyGILState_Release(__pyx_gilstate_save);\
} else {\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
}
#else
#define __Pyx_RefNannySetupContext(name, acquire_gil)\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__)
#endif
#define __Pyx_RefNannyFinishContext()\
__Pyx_RefNanny->FinishContext(&__pyx_refnanny)
#define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0)
#define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0)
#define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0)
#define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0)
#else
#define __Pyx_RefNannyDeclarations
#define __Pyx_RefNannySetupContext(name, acquire_gil)
#define __Pyx_RefNannyFinishContext()
#define __Pyx_INCREF(r) Py_INCREF(r)
#define __Pyx_DECREF(r) Py_DECREF(r)
#define __Pyx_GOTREF(r)
#define __Pyx_GIVEREF(r)
#define __Pyx_XINCREF(r) Py_XINCREF(r)
#define __Pyx_XDECREF(r) Py_XDECREF(r)
#define __Pyx_XGOTREF(r)
#define __Pyx_XGIVEREF(r)
#endif
#define __Pyx_XDECREF_SET(r, v) do {\
PyObject *tmp = (PyObject *) r;\
r = v; __Pyx_XDECREF(tmp);\
} while (0)
#define __Pyx_DECREF_SET(r, v) do {\
PyObject *tmp = (PyObject *) r;\
r = v; __Pyx_DECREF(tmp);\
} while (0)
#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0)
#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0)
/* RaiseArgTupleInvalid.proto */
static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found);
/* RaiseDoubleKeywords.proto */
static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name);
/* ParseKeywords.proto */
static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\
PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\
const char* function_name);
/* ArgTypeTest.proto */
static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed,
const char *name, int exact);
/* BufferFormatCheck.proto */
static CYTHON_INLINE int __Pyx_GetBufferAndValidate(Py_buffer* buf, PyObject* obj,
__Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack);
static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info);
static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts);
static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
__Pyx_BufFmt_StackElem* stack,
__Pyx_TypeInfo* type); // PROTO
/* PyObjectGetAttrStr.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) {
PyTypeObject* tp = Py_TYPE(obj);
if (likely(tp->tp_getattro))
return tp->tp_getattro(obj, attr_name);
#if PY_MAJOR_VERSION < 3
if (likely(tp->tp_getattr))
return tp->tp_getattr(obj, PyString_AS_STRING(attr_name));
#endif
return PyObject_GetAttr(obj, attr_name);
}
#else
#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n)
#endif
/* GetBuiltinName.proto */
static PyObject *__Pyx_GetBuiltinName(PyObject *name);
/* GetModuleGlobalName.proto */
static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name);
/* PyObjectCall.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw);
#else
#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw)
#endif
/* ExtTypeTest.proto */
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type);
/* PyObjectCallMethO.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg);
#endif
/* PyObjectCallOneArg.proto */
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg);
/* PyObjectCallNoArg.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func);
#else
#define __Pyx_PyObject_CallNoArg(func) __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL)
#endif
/* BufferIndexError.proto */
static void __Pyx_RaiseBufferIndexError(int axis);
#define __Pyx_BufPtrStrided1d(type, buf, i0, s0) (type)((char*)buf + i0 * s0)
#define __Pyx_BufPtrStrided2d(type, buf, i0, s0, i1, s1) (type)((char*)buf + i0 * s0 + i1 * s1)
/* SliceObject.proto */
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice(
PyObject* obj, Py_ssize_t cstart, Py_ssize_t cstop,
PyObject** py_start, PyObject** py_stop, PyObject** py_slice,
int has_cstart, int has_cstop, int wraparound);
/* BufferFallbackError.proto */
static void __Pyx_RaiseBufferFallbackError(void);
/* PyThreadStateGet.proto */
#if CYTHON_COMPILING_IN_CPYTHON
#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate;
#define __Pyx_PyThreadState_assign __pyx_tstate = PyThreadState_GET();
#else
#define __Pyx_PyThreadState_declare
#define __Pyx_PyThreadState_assign
#endif
/* PyErrFetchRestore.proto */
#if CYTHON_COMPILING_IN_CPYTHON
#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb)
#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#else
#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb)
#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb)
#endif
/* RaiseException.proto */
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause);
/* DictGetItem.proto */
#if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY
static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key) {
PyObject *value;
value = PyDict_GetItemWithError(d, key);
if (unlikely(!value)) {
if (!PyErr_Occurred()) {
PyObject* args = PyTuple_Pack(1, key);
if (likely(args))
PyErr_SetObject(PyExc_KeyError, args);
Py_XDECREF(args);
}
return NULL;
}
Py_INCREF(value);
return value;
}
#else
#define __Pyx_PyDict_GetItem(d, key) PyObject_GetItem(d, key)
#endif
/* RaiseTooManyValuesToUnpack.proto */
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected);
/* RaiseNeedMoreValuesToUnpack.proto */
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index);
/* RaiseNoneIterError.proto */
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void);
/* Import.proto */
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level);
/* CodeObjectCache.proto */
typedef struct {
PyCodeObject* code_object;
int code_line;
} __Pyx_CodeObjectCacheEntry;
struct __Pyx_CodeObjectCache {
int count;
int max_count;
__Pyx_CodeObjectCacheEntry* entries;
};
static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL};
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line);
static PyCodeObject *__pyx_find_code_object(int code_line);
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object);
/* AddTraceback.proto */
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename);
/* BufferStructDeclare.proto */
typedef struct {
Py_ssize_t shape, strides, suboffsets;
} __Pyx_Buf_DimInfo;
typedef struct {
size_t refcount;
Py_buffer pybuffer;
} __Pyx_Buffer;
typedef struct {
__Pyx_Buffer *rcbuffer;
char *data;
__Pyx_Buf_DimInfo diminfo[8];
} __Pyx_LocalBuf_ND;
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags);
static void __Pyx_ReleaseBuffer(Py_buffer *view);
#else
#define __Pyx_GetBuffer PyObject_GetBuffer
#define __Pyx_ReleaseBuffer PyBuffer_Release
#endif
/* None.proto */
static Py_ssize_t __Pyx_zeros[] = {0, 0, 0, 0, 0, 0, 0, 0};
static Py_ssize_t __Pyx_minusones[] = {-1, -1, -1, -1, -1, -1, -1, -1};
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value);
/* None.proto */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
#define __Pyx_CREAL(z) ((z).real())
#define __Pyx_CIMAG(z) ((z).imag())
#else
#define __Pyx_CREAL(z) (__real__(z))
#define __Pyx_CIMAG(z) (__imag__(z))
#endif
#else
#define __Pyx_CREAL(z) ((z).real)
#define __Pyx_CIMAG(z) ((z).imag)
#endif
#if defined(__cplusplus) && CYTHON_CCOMPLEX && (defined(_WIN32) || defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5 || __GNUC__ == 4 && __GNUC_MINOR__ >= 4 )) || __cplusplus >= 201103)
#define __Pyx_SET_CREAL(z,x) ((z).real(x))
#define __Pyx_SET_CIMAG(z,y) ((z).imag(y))
#else
#define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x)
#define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y)
#endif
/* None.proto */
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float);
/* None.proto */
#if CYTHON_CCOMPLEX
#define __Pyx_c_eqf(a, b) ((a)==(b))
#define __Pyx_c_sumf(a, b) ((a)+(b))
#define __Pyx_c_difff(a, b) ((a)-(b))
#define __Pyx_c_prodf(a, b) ((a)*(b))
#define __Pyx_c_quotf(a, b) ((a)/(b))
#define __Pyx_c_negf(a) (-(a))
#ifdef __cplusplus
#define __Pyx_c_is_zerof(z) ((z)==(float)0)
#define __Pyx_c_conjf(z) (::std::conj(z))
#if 1
#define __Pyx_c_absf(z) (::std::abs(z))
#define __Pyx_c_powf(a, b) (::::pow(a, b))
#endif
#else
#define __Pyx_c_is_zerof(z) ((z)==0)
#define __Pyx_c_conjf(z) (conjf(z))
#if 1
#define __Pyx_c_absf(z) (cabsf(z))
#define __Pyx_c_powf(a, b) (cpowf(a, b))
#endif
#endif
#else
static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex);
static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex);
#if 1
static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex, __pyx_t_float_complex);
#endif
#endif
/* None.proto */
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double);
/* None.proto */
#if CYTHON_CCOMPLEX
#define __Pyx_c_eq(a, b) ((a)==(b))
#define __Pyx_c_sum(a, b) ((a)+(b))
#define __Pyx_c_diff(a, b) ((a)-(b))
#define __Pyx_c_prod(a, b) ((a)*(b))
#define __Pyx_c_quot(a, b) ((a)/(b))
#define __Pyx_c_neg(a) (-(a))
#ifdef __cplusplus
#define __Pyx_c_is_zero(z) ((z)==(double)0)
#define __Pyx_c_conj(z) (::std::conj(z))
#if 1
#define __Pyx_c_abs(z) (::std::abs(z))
#define __Pyx_c_pow(a, b) (::::pow(a, b))
#endif
#else
#define __Pyx_c_is_zero(z) ((z)==0)
#define __Pyx_c_conj(z) (conj(z))
#if 1
#define __Pyx_c_abs(z) (cabs(z))
#define __Pyx_c_pow(a, b) (cpow(a, b))
#endif
#endif
#else
static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex);
static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex);
#if 1
static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex, __pyx_t_double_complex);
#endif
#endif
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value);
/* CIntFromPy.proto */
static CYTHON_INLINE npy_int32 __Pyx_PyInt_As_npy_int32(PyObject *);
/* CIntFromPy.proto */
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *);
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value);
/* CIntFromPy.proto */
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *);
/* CheckBinaryVersion.proto */
static int __Pyx_check_binary_version(void);
/* PyIdentifierFromString.proto */
#if !defined(__Pyx_PyIdentifier_FromString)
#if PY_MAJOR_VERSION < 3
#define __Pyx_PyIdentifier_FromString(s) PyString_FromString(s)
#else
#define __Pyx_PyIdentifier_FromString(s) PyUnicode_FromString(s)
#endif
#endif
/* ModuleImport.proto */
static PyObject *__Pyx_ImportModule(const char *name);
/* TypeImport.proto */
static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict);
/* InitStrings.proto */
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t);
/* Module declarations from 'cpython.buffer' */
/* Module declarations from 'libc.string' */
/* Module declarations from 'libc.stdio' */
/* Module declarations from '__builtin__' */
/* Module declarations from 'cpython.type' */
static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0;
/* Module declarations from 'cpython' */
/* Module declarations from 'cpython.object' */
/* Module declarations from 'cpython.ref' */
/* Module declarations from 'libc.stdlib' */
/* Module declarations from 'numpy' */
/* Module declarations from 'numpy' */
static PyTypeObject *__pyx_ptype_5numpy_dtype = 0;
static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0;
static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0;
static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0;
static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0;
static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/
/* Module declarations from 'nms.gpu_nms' */
static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_5numpy_float32_t = { "float32_t", NULL, sizeof(__pyx_t_5numpy_float32_t), { 0 }, 0, 'R', 0, 0 };
static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_5numpy_int32_t = { "int32_t", NULL, sizeof(__pyx_t_5numpy_int32_t), { 0 }, 0, IS_UNSIGNED(__pyx_t_5numpy_int32_t) ? 'U' : 'I', IS_UNSIGNED(__pyx_t_5numpy_int32_t), 0 };
static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t = { "intp_t", NULL, sizeof(__pyx_t_5numpy_intp_t), { 0 }, 0, IS_UNSIGNED(__pyx_t_5numpy_intp_t) ? 'U' : 'I', IS_UNSIGNED(__pyx_t_5numpy_intp_t), 0 };
#define __Pyx_MODULE_NAME "nms.gpu_nms"
int __pyx_module_is_main_nms__gpu_nms = 0;
/* Implementation of 'nms.gpu_nms' */
static PyObject *__pyx_builtin_ValueError;
static PyObject *__pyx_builtin_range;
static PyObject *__pyx_builtin_RuntimeError;
static const char __pyx_k_np[] = "np";
static const char __pyx_k_dets[] = "dets";
static const char __pyx_k_keep[] = "keep";
static const char __pyx_k_main[] = "__main__";
static const char __pyx_k_test[] = "__test__";
static const char __pyx_k_dtype[] = "dtype";
static const char __pyx_k_int32[] = "int32";
static const char __pyx_k_numpy[] = "numpy";
static const char __pyx_k_order[] = "order";
static const char __pyx_k_range[] = "range";
static const char __pyx_k_zeros[] = "zeros";
static const char __pyx_k_import[] = "__import__";
static const char __pyx_k_scores[] = "scores";
static const char __pyx_k_thresh[] = "thresh";
static const char __pyx_k_argsort[] = "argsort";
static const char __pyx_k_gpu_nms[] = "gpu_nms";
static const char __pyx_k_num_out[] = "num_out";
static const char __pyx_k_boxes_dim[] = "boxes_dim";
static const char __pyx_k_boxes_num[] = "boxes_num";
static const char __pyx_k_device_id[] = "device_id";
static const char __pyx_k_ValueError[] = "ValueError";
static const char __pyx_k_nms_gpu_nms[] = "nms.gpu_nms";
static const char __pyx_k_sorted_dets[] = "sorted_dets";
static const char __pyx_k_RuntimeError[] = "RuntimeError";
static const char __pyx_k_ndarray_is_not_C_contiguous[] = "ndarray is not C contiguous";
static const char __pyx_k_unknown_dtype_code_in_numpy_pxd[] = "unknown dtype code in numpy.pxd (%d)";
static const char __pyx_k_D_v_zix_caffe_caffe_win_20160523[] = "D:\\v-zix\\caffe\\caffe-win-20160523\\models\\py-faster-rcnn-windows\\lib\\nms\\gpu_nms.pyx";
static const char __pyx_k_Format_string_allocated_too_shor[] = "Format string allocated too short, see comment in numpy.pxd";
static const char __pyx_k_Non_native_byte_order_not_suppor[] = "Non-native byte order not supported";
static const char __pyx_k_ndarray_is_not_Fortran_contiguou[] = "ndarray is not Fortran contiguous";
static const char __pyx_k_Format_string_allocated_too_shor_2[] = "Format string allocated too short.";
static PyObject *__pyx_kp_s_D_v_zix_caffe_caffe_win_20160523;
static PyObject *__pyx_kp_u_Format_string_allocated_too_shor;
static PyObject *__pyx_kp_u_Format_string_allocated_too_shor_2;
static PyObject *__pyx_kp_u_Non_native_byte_order_not_suppor;
static PyObject *__pyx_n_s_RuntimeError;
static PyObject *__pyx_n_s_ValueError;
static PyObject *__pyx_n_s_argsort;
static PyObject *__pyx_n_s_boxes_dim;
static PyObject *__pyx_n_s_boxes_num;
static PyObject *__pyx_n_s_dets;
static PyObject *__pyx_n_s_device_id;
static PyObject *__pyx_n_s_dtype;
static PyObject *__pyx_n_s_gpu_nms;
static PyObject *__pyx_n_s_import;
static PyObject *__pyx_n_s_int32;
static PyObject *__pyx_n_s_keep;
static PyObject *__pyx_n_s_main;
static PyObject *__pyx_kp_u_ndarray_is_not_C_contiguous;
static PyObject *__pyx_kp_u_ndarray_is_not_Fortran_contiguou;
static PyObject *__pyx_n_s_nms_gpu_nms;
static PyObject *__pyx_n_s_np;
static PyObject *__pyx_n_s_num_out;
static PyObject *__pyx_n_s_numpy;
static PyObject *__pyx_n_s_order;
static PyObject *__pyx_n_s_range;
static PyObject *__pyx_n_s_scores;
static PyObject *__pyx_n_s_sorted_dets;
static PyObject *__pyx_n_s_test;
static PyObject *__pyx_n_s_thresh;
static PyObject *__pyx_kp_u_unknown_dtype_code_in_numpy_pxd;
static PyObject *__pyx_n_s_zeros;
static PyObject *__pyx_pf_3nms_7gpu_nms_gpu_nms(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_dets, PyObject *__pyx_v_thresh, __pyx_t_5numpy_int32_t __pyx_v_device_id); /* proto */
static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */
static PyObject *__pyx_int_4;
static PyObject *__pyx_int_neg_1;
static PyObject *__pyx_slice_;
static PyObject *__pyx_slice__3;
static PyObject *__pyx_slice__4;
static PyObject *__pyx_tuple__2;
static PyObject *__pyx_tuple__5;
static PyObject *__pyx_tuple__6;
static PyObject *__pyx_tuple__7;
static PyObject *__pyx_tuple__8;
static PyObject *__pyx_tuple__9;
static PyObject *__pyx_tuple__10;
static PyObject *__pyx_tuple__11;
static PyObject *__pyx_codeobj__12;
/* "nms/gpu_nms.pyx":16
* void _nms(np.int32_t*, int*, np.float32_t*, int, int, float, int)
*
* def gpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh, # <<<<<<<<<<<<<<
* np.int32_t device_id=0):
* cdef int boxes_num = dets.shape[0]
*/
/* Python wrapper */
static PyObject *__pyx_pw_3nms_7gpu_nms_1gpu_nms(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyMethodDef __pyx_mdef_3nms_7gpu_nms_1gpu_nms = {"gpu_nms", (PyCFunction)__pyx_pw_3nms_7gpu_nms_1gpu_nms, METH_VARARGS|METH_KEYWORDS, 0};
static PyObject *__pyx_pw_3nms_7gpu_nms_1gpu_nms(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyArrayObject *__pyx_v_dets = 0;
PyObject *__pyx_v_thresh = 0;
__pyx_t_5numpy_int32_t __pyx_v_device_id;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("gpu_nms (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_dets,&__pyx_n_s_thresh,&__pyx_n_s_device_id,0};
PyObject* values[3] = {0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_dets)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
case 1:
if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_thresh)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("gpu_nms", 0, 2, 3, 1); __PYX_ERR(0, 16, __pyx_L3_error)
}
case 2:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_device_id);
if (value) { values[2] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gpu_nms") < 0)) __PYX_ERR(0, 16, __pyx_L3_error)
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_dets = ((PyArrayObject *)values[0]);
__pyx_v_thresh = ((PyObject*)values[1]);
if (values[2]) {
__pyx_v_device_id = __Pyx_PyInt_As_npy_int32(values[2]); if (unlikely((__pyx_v_device_id == (npy_int32)-1) && PyErr_Occurred())) __PYX_ERR(0, 17, __pyx_L3_error)
} else {
__pyx_v_device_id = ((__pyx_t_5numpy_int32_t)0);
}
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("gpu_nms", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 16, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("nms.gpu_nms.gpu_nms", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_dets), __pyx_ptype_5numpy_ndarray, 1, "dets", 0))) __PYX_ERR(0, 16, __pyx_L1_error)
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_thresh), (&PyFloat_Type), 1, "thresh", 1))) __PYX_ERR(0, 16, __pyx_L1_error)
__pyx_r = __pyx_pf_3nms_7gpu_nms_gpu_nms(__pyx_self, __pyx_v_dets, __pyx_v_thresh, __pyx_v_device_id);
/* function exit code */
goto __pyx_L0;
__pyx_L1_error:;
__pyx_r = NULL;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_3nms_7gpu_nms_gpu_nms(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_dets, PyObject *__pyx_v_thresh, __pyx_t_5numpy_int32_t __pyx_v_device_id) {
int __pyx_v_boxes_num;
int __pyx_v_boxes_dim;
int __pyx_v_num_out;
PyArrayObject *__pyx_v_keep = 0;
PyArrayObject *__pyx_v_scores = 0;
PyArrayObject *__pyx_v_order = 0;
PyArrayObject *__pyx_v_sorted_dets = 0;
__Pyx_LocalBuf_ND __pyx_pybuffernd_dets;
__Pyx_Buffer __pyx_pybuffer_dets;
__Pyx_LocalBuf_ND __pyx_pybuffernd_keep;
__Pyx_Buffer __pyx_pybuffer_keep;
__Pyx_LocalBuf_ND __pyx_pybuffernd_order;
__Pyx_Buffer __pyx_pybuffer_order;
__Pyx_LocalBuf_ND __pyx_pybuffernd_scores;
__Pyx_Buffer __pyx_pybuffer_scores;
__Pyx_LocalBuf_ND __pyx_pybuffernd_sorted_dets;
__Pyx_Buffer __pyx_pybuffer_sorted_dets;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyArrayObject *__pyx_t_6 = NULL;
PyArrayObject *__pyx_t_7 = NULL;
PyArrayObject *__pyx_t_8 = NULL;
PyArrayObject *__pyx_t_9 = NULL;
Py_ssize_t __pyx_t_10;
int __pyx_t_11;
Py_ssize_t __pyx_t_12;
Py_ssize_t __pyx_t_13;
float __pyx_t_14;
PyObject *__pyx_t_15 = NULL;
PyObject *__pyx_t_16 = NULL;
PyObject *__pyx_t_17 = NULL;
__Pyx_RefNannySetupContext("gpu_nms", 0);
__pyx_pybuffer_keep.pybuffer.buf = NULL;
__pyx_pybuffer_keep.refcount = 0;
__pyx_pybuffernd_keep.data = NULL;
__pyx_pybuffernd_keep.rcbuffer = &__pyx_pybuffer_keep;
__pyx_pybuffer_scores.pybuffer.buf = NULL;
__pyx_pybuffer_scores.refcount = 0;
__pyx_pybuffernd_scores.data = NULL;
__pyx_pybuffernd_scores.rcbuffer = &__pyx_pybuffer_scores;
__pyx_pybuffer_order.pybuffer.buf = NULL;
__pyx_pybuffer_order.refcount = 0;
__pyx_pybuffernd_order.data = NULL;
__pyx_pybuffernd_order.rcbuffer = &__pyx_pybuffer_order;
__pyx_pybuffer_sorted_dets.pybuffer.buf = NULL;
__pyx_pybuffer_sorted_dets.refcount = 0;
__pyx_pybuffernd_sorted_dets.data = NULL;
__pyx_pybuffernd_sorted_dets.rcbuffer = &__pyx_pybuffer_sorted_dets;
__pyx_pybuffer_dets.pybuffer.buf = NULL;
__pyx_pybuffer_dets.refcount = 0;
__pyx_pybuffernd_dets.data = NULL;
__pyx_pybuffernd_dets.rcbuffer = &__pyx_pybuffer_dets;
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_dets.rcbuffer->pybuffer, (PyObject*)__pyx_v_dets, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float32_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) __PYX_ERR(0, 16, __pyx_L1_error)
}
__pyx_pybuffernd_dets.diminfo[0].strides = __pyx_pybuffernd_dets.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_dets.diminfo[0].shape = __pyx_pybuffernd_dets.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_dets.diminfo[1].strides = __pyx_pybuffernd_dets.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_dets.diminfo[1].shape = __pyx_pybuffernd_dets.rcbuffer->pybuffer.shape[1];
/* "nms/gpu_nms.pyx":18
* def gpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh,
* np.int32_t device_id=0):
* cdef int boxes_num = dets.shape[0] # <<<<<<<<<<<<<<
* cdef int boxes_dim = dets.shape[1]
* cdef int num_out
*/
__pyx_v_boxes_num = (__pyx_v_dets->dimensions[0]);
/* "nms/gpu_nms.pyx":19
* np.int32_t device_id=0):
* cdef int boxes_num = dets.shape[0]
* cdef int boxes_dim = dets.shape[1] # <<<<<<<<<<<<<<
* cdef int num_out
* cdef np.ndarray[np.int32_t, ndim=1] \
*/
__pyx_v_boxes_dim = (__pyx_v_dets->dimensions[1]);
/* "nms/gpu_nms.pyx":22
* cdef int num_out
* cdef np.ndarray[np.int32_t, ndim=1] \
* keep = np.zeros(boxes_num, dtype=np.int32) # <<<<<<<<<<<<<<
* cdef np.ndarray[np.float32_t, ndim=1] \
* scores = dets[:, 4]
*/
__pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 22, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_boxes_num); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 22, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);
__pyx_t_1 = 0;
__pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 22, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_int32); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_dtype, __pyx_t_5) < 0) __PYX_ERR(0, 22, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_3, __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 22, __pyx_L1_error)
__pyx_t_6 = ((PyArrayObject *)__pyx_t_5);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_keep.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_5numpy_int32_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {
__pyx_v_keep = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_keep.rcbuffer->pybuffer.buf = NULL;
__PYX_ERR(0, 21, __pyx_L1_error)
} else {__pyx_pybuffernd_keep.diminfo[0].strides = __pyx_pybuffernd_keep.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_keep.diminfo[0].shape = __pyx_pybuffernd_keep.rcbuffer->pybuffer.shape[0];
}
}
__pyx_t_6 = 0;
__pyx_v_keep = ((PyArrayObject *)__pyx_t_5);
__pyx_t_5 = 0;
/* "nms/gpu_nms.pyx":24
* keep = np.zeros(boxes_num, dtype=np.int32)
* cdef np.ndarray[np.float32_t, ndim=1] \
* scores = dets[:, 4] # <<<<<<<<<<<<<<
* #cdef np.ndarray[np.int_t, ndim=1] \ // 20160601, by xzn
* # order = scores.argsort()[::-1]
*/
__pyx_t_5 = PyObject_GetItem(((PyObject *)__pyx_v_dets), __pyx_tuple__2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 24, __pyx_L1_error)
__pyx_t_7 = ((PyArrayObject *)__pyx_t_5);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_scores.rcbuffer->pybuffer, (PyObject*)__pyx_t_7, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float32_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {
__pyx_v_scores = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_scores.rcbuffer->pybuffer.buf = NULL;
__PYX_ERR(0, 23, __pyx_L1_error)
} else {__pyx_pybuffernd_scores.diminfo[0].strides = __pyx_pybuffernd_scores.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_scores.diminfo[0].shape = __pyx_pybuffernd_scores.rcbuffer->pybuffer.shape[0];
}
}
__pyx_t_7 = 0;
__pyx_v_scores = ((PyArrayObject *)__pyx_t_5);
__pyx_t_5 = 0;
/* "nms/gpu_nms.pyx":28
* # order = scores.argsort()[::-1]
* cdef np.ndarray[np.intp_t, ndim=1] \
* order = scores.argsort()[::-1] # <<<<<<<<<<<<<<
* cdef np.ndarray[np.float32_t, ndim=2] \
* sorted_dets = dets[order, :]
*/
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_scores), __pyx_n_s_argsort); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 28, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = NULL;
if (CYTHON_COMPILING_IN_CPYTHON && likely(PyMethod_Check(__pyx_t_1))) {
__pyx_t_3 = PyMethod_GET_SELF(__pyx_t_1);
if (likely(__pyx_t_3)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1);
__Pyx_INCREF(__pyx_t_3);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_1, function);
}
}
if (__pyx_t_3) {
__pyx_t_5 = __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 28, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
} else {
__pyx_t_5 = __Pyx_PyObject_CallNoArg(__pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 28, __pyx_L1_error)
}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = PyObject_GetItem(__pyx_t_5, __pyx_slice__3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 28, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 28, __pyx_L1_error)
__pyx_t_8 = ((PyArrayObject *)__pyx_t_1);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_order.rcbuffer->pybuffer, (PyObject*)__pyx_t_8, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {
__pyx_v_order = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_order.rcbuffer->pybuffer.buf = NULL;
__PYX_ERR(0, 27, __pyx_L1_error)
} else {__pyx_pybuffernd_order.diminfo[0].strides = __pyx_pybuffernd_order.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_order.diminfo[0].shape = __pyx_pybuffernd_order.rcbuffer->pybuffer.shape[0];
}
}
__pyx_t_8 = 0;
__pyx_v_order = ((PyArrayObject *)__pyx_t_1);
__pyx_t_1 = 0;
/* "nms/gpu_nms.pyx":30
* order = scores.argsort()[::-1]
* cdef np.ndarray[np.float32_t, ndim=2] \
* sorted_dets = dets[order, :] # <<<<<<<<<<<<<<
* _nms(&keep[0], &num_out, &sorted_dets[0, 0], boxes_num, boxes_dim, thresh, device_id)
* keep = keep[:num_out]
*/
__pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 30, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(((PyObject *)__pyx_v_order));
__Pyx_GIVEREF(((PyObject *)__pyx_v_order));
PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_v_order));
__Pyx_INCREF(__pyx_slice__4);
__Pyx_GIVEREF(__pyx_slice__4);
PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_slice__4);
__pyx_t_5 = PyObject_GetItem(((PyObject *)__pyx_v_dets), __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 30, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 30, __pyx_L1_error)
__pyx_t_9 = ((PyArrayObject *)__pyx_t_5);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer, (PyObject*)__pyx_t_9, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float32_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {
__pyx_v_sorted_dets = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer.buf = NULL;
__PYX_ERR(0, 29, __pyx_L1_error)
} else {__pyx_pybuffernd_sorted_dets.diminfo[0].strides = __pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_sorted_dets.diminfo[0].shape = __pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_sorted_dets.diminfo[1].strides = __pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_sorted_dets.diminfo[1].shape = __pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer.shape[1];
}
}
__pyx_t_9 = 0;
__pyx_v_sorted_dets = ((PyArrayObject *)__pyx_t_5);
__pyx_t_5 = 0;
/* "nms/gpu_nms.pyx":31
* cdef np.ndarray[np.float32_t, ndim=2] \
* sorted_dets = dets[order, :]
* _nms(&keep[0], &num_out, &sorted_dets[0, 0], boxes_num, boxes_dim, thresh, device_id) # <<<<<<<<<<<<<<
* keep = keep[:num_out]
* return list(order[keep])
*/
__pyx_t_10 = 0;
__pyx_t_11 = -1;
if (__pyx_t_10 < 0) {
__pyx_t_10 += __pyx_pybuffernd_keep.diminfo[0].shape;
if (unlikely(__pyx_t_10 < 0)) __pyx_t_11 = 0;
} else if (unlikely(__pyx_t_10 >= __pyx_pybuffernd_keep.diminfo[0].shape)) __pyx_t_11 = 0;
if (unlikely(__pyx_t_11 != -1)) {
__Pyx_RaiseBufferIndexError(__pyx_t_11);
__PYX_ERR(0, 31, __pyx_L1_error)
}
__pyx_t_12 = 0;
__pyx_t_13 = 0;
__pyx_t_11 = -1;
if (__pyx_t_12 < 0) {
__pyx_t_12 += __pyx_pybuffernd_sorted_dets.diminfo[0].shape;
if (unlikely(__pyx_t_12 < 0)) __pyx_t_11 = 0;
} else if (unlikely(__pyx_t_12 >= __pyx_pybuffernd_sorted_dets.diminfo[0].shape)) __pyx_t_11 = 0;
if (__pyx_t_13 < 0) {
__pyx_t_13 += __pyx_pybuffernd_sorted_dets.diminfo[1].shape;
if (unlikely(__pyx_t_13 < 0)) __pyx_t_11 = 1;
} else if (unlikely(__pyx_t_13 >= __pyx_pybuffernd_sorted_dets.diminfo[1].shape)) __pyx_t_11 = 1;
if (unlikely(__pyx_t_11 != -1)) {
__Pyx_RaiseBufferIndexError(__pyx_t_11);
__PYX_ERR(0, 31, __pyx_L1_error)
}
__pyx_t_14 = __pyx_PyFloat_AsFloat(__pyx_v_thresh); if (unlikely((__pyx_t_14 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 31, __pyx_L1_error)
_nms((&(*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_int32_t *, __pyx_pybuffernd_keep.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_keep.diminfo[0].strides))), (&__pyx_v_num_out), (&(*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float32_t *, __pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_sorted_dets.diminfo[0].strides, __pyx_t_13, __pyx_pybuffernd_sorted_dets.diminfo[1].strides))), __pyx_v_boxes_num, __pyx_v_boxes_dim, __pyx_t_14, __pyx_v_device_id);
/* "nms/gpu_nms.pyx":32
* sorted_dets = dets[order, :]
* _nms(&keep[0], &num_out, &sorted_dets[0, 0], boxes_num, boxes_dim, thresh, device_id)
* keep = keep[:num_out] # <<<<<<<<<<<<<<
* return list(order[keep])
*/
__pyx_t_5 = __Pyx_PyObject_GetSlice(((PyObject *)__pyx_v_keep), 0, __pyx_v_num_out, NULL, NULL, NULL, 0, 1, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 32, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 32, __pyx_L1_error)
__pyx_t_6 = ((PyArrayObject *)__pyx_t_5);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_keep.rcbuffer->pybuffer);
__pyx_t_11 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_keep.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_5numpy_int32_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack);
if (unlikely(__pyx_t_11 < 0)) {
PyErr_Fetch(&__pyx_t_15, &__pyx_t_16, &__pyx_t_17);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_keep.rcbuffer->pybuffer, (PyObject*)__pyx_v_keep, &__Pyx_TypeInfo_nn___pyx_t_5numpy_int32_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_15); Py_XDECREF(__pyx_t_16); Py_XDECREF(__pyx_t_17);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_15, __pyx_t_16, __pyx_t_17);
}
}
__pyx_pybuffernd_keep.diminfo[0].strides = __pyx_pybuffernd_keep.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_keep.diminfo[0].shape = __pyx_pybuffernd_keep.rcbuffer->pybuffer.shape[0];
if (unlikely(__pyx_t_11 < 0)) __PYX_ERR(0, 32, __pyx_L1_error)
}
__pyx_t_6 = 0;
__Pyx_DECREF_SET(__pyx_v_keep, ((PyArrayObject *)__pyx_t_5));
__pyx_t_5 = 0;
/* "nms/gpu_nms.pyx":33
* _nms(&keep[0], &num_out, &sorted_dets[0, 0], boxes_num, boxes_dim, thresh, device_id)
* keep = keep[:num_out]
* return list(order[keep]) # <<<<<<<<<<<<<<
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_5 = PyObject_GetItem(((PyObject *)__pyx_v_order), ((PyObject *)__pyx_v_keep)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 33, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_1 = PySequence_List(__pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 33, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "nms/gpu_nms.pyx":16
* void _nms(np.int32_t*, int*, np.float32_t*, int, int, float, int)
*
* def gpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh, # <<<<<<<<<<<<<<
* np.int32_t device_id=0):
* cdef int boxes_num = dets.shape[0]
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
{ PyObject *__pyx_type, *__pyx_value, *__pyx_tb;
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_dets.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_keep.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_order.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_scores.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer);
__Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}
__Pyx_AddTraceback("nms.gpu_nms.gpu_nms", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
goto __pyx_L2;
__pyx_L0:;
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_dets.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_keep.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_order.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_scores.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer);
__pyx_L2:;
__Pyx_XDECREF((PyObject *)__pyx_v_keep);
__Pyx_XDECREF((PyObject *)__pyx_v_scores);
__Pyx_XDECREF((PyObject *)__pyx_v_order);
__Pyx_XDECREF((PyObject *)__pyx_v_sorted_dets);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":197
* # experimental exception made for __getbuffer__ and __releasebuffer__
* # -- the details of this may change.
* def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<<
* # This implementation of getbuffer is geared towards Cython
* # requirements, and does not yet fullfill the PEP.
*/
/* Python wrapper */
static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0);
__pyx_r = __pyx_pf_5numpy_7ndarray___getbuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_v_copy_shape;
int __pyx_v_i;
int __pyx_v_ndim;
int __pyx_v_endian_detector;
int __pyx_v_little_endian;
int __pyx_v_t;
char *__pyx_v_f;
PyArray_Descr *__pyx_v_descr = 0;
int __pyx_v_offset;
int __pyx_v_hasfields;
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
int __pyx_t_5;
PyObject *__pyx_t_6 = NULL;
char *__pyx_t_7;
__Pyx_RefNannySetupContext("__getbuffer__", 0);
if (__pyx_v_info != NULL) {
__pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
__Pyx_GIVEREF(__pyx_v_info->obj);
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":203
* # of flags
*
* if info == NULL: return # <<<<<<<<<<<<<<
*
* cdef int copy_shape, i, ndim
*/
__pyx_t_1 = ((__pyx_v_info == NULL) != 0);
if (__pyx_t_1) {
__pyx_r = 0;
goto __pyx_L0;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":206
*
* cdef int copy_shape, i, ndim
* cdef int endian_detector = 1 # <<<<<<<<<<<<<<
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
*
*/
__pyx_v_endian_detector = 1;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":207
* cdef int copy_shape, i, ndim
* cdef int endian_detector = 1
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<<
*
* ndim = PyArray_NDIM(self)
*/
__pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0);
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":209
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
*
* ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<<
*
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
*/
__pyx_v_ndim = PyArray_NDIM(__pyx_v_self);
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":211
* ndim = PyArray_NDIM(self)
*
* if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<<
* copy_shape = 1
* else:
*/
__pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0);
if (__pyx_t_1) {
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":212
*
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
* copy_shape = 1 # <<<<<<<<<<<<<<
* else:
* copy_shape = 0
*/
__pyx_v_copy_shape = 1;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":211
* ndim = PyArray_NDIM(self)
*
* if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<<
* copy_shape = 1
* else:
*/
goto __pyx_L4;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":214
* copy_shape = 1
* else:
* copy_shape = 0 # <<<<<<<<<<<<<<
*
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
*/
/*else*/ {
__pyx_v_copy_shape = 0;
}
__pyx_L4:;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":216
* copy_shape = 0
*
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<<
* and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
* raise ValueError(u"ndarray is not C contiguous")
*/
__pyx_t_2 = (((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L6_bool_binop_done;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":217
*
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): # <<<<<<<<<<<<<<
* raise ValueError(u"ndarray is not C contiguous")
*
*/
__pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_C_CONTIGUOUS) != 0)) != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L6_bool_binop_done:;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":216
* copy_shape = 0
*
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<<
* and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
* raise ValueError(u"ndarray is not C contiguous")
*/
if (__pyx_t_1) {
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":218
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
* raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<<
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 218, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 218, __pyx_L1_error)
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":216
* copy_shape = 0
*
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<<
* and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
* raise ValueError(u"ndarray is not C contiguous")
*/
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":220
* raise ValueError(u"ndarray is not C contiguous")
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<<
* and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
* raise ValueError(u"ndarray is not Fortran contiguous")
*/
__pyx_t_2 = (((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L9_bool_binop_done;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":221
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): # <<<<<<<<<<<<<<
* raise ValueError(u"ndarray is not Fortran contiguous")
*
*/
__pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_F_CONTIGUOUS) != 0)) != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L9_bool_binop_done:;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":220
* raise ValueError(u"ndarray is not C contiguous")
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<<
* and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
* raise ValueError(u"ndarray is not Fortran contiguous")
*/
if (__pyx_t_1) {
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":222
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
* raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<<
*
* info.buf = PyArray_DATA(self)
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 222, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 222, __pyx_L1_error)
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":220
* raise ValueError(u"ndarray is not C contiguous")
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<<
* and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
* raise ValueError(u"ndarray is not Fortran contiguous")
*/
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":224
* raise ValueError(u"ndarray is not Fortran contiguous")
*
* info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<<
* info.ndim = ndim
* if copy_shape:
*/
__pyx_v_info->buf = PyArray_DATA(__pyx_v_self);
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":225
*
* info.buf = PyArray_DATA(self)
* info.ndim = ndim # <<<<<<<<<<<<<<
* if copy_shape:
* # Allocate new buffer for strides and shape info.
*/
__pyx_v_info->ndim = __pyx_v_ndim;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":226
* info.buf = PyArray_DATA(self)
* info.ndim = ndim
* if copy_shape: # <<<<<<<<<<<<<<
* # Allocate new buffer for strides and shape info.
* # This is allocated as one block, strides first.
*/
__pyx_t_1 = (__pyx_v_copy_shape != 0);
if (__pyx_t_1) {
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":229
* # Allocate new buffer for strides and shape info.
* # This is allocated as one block, strides first.
* info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2) # <<<<<<<<<<<<<<
* info.shape = info.strides + ndim
* for i in range(ndim):
*/
__pyx_v_info->strides = ((Py_ssize_t *)malloc((((sizeof(Py_ssize_t)) * ((size_t)__pyx_v_ndim)) * 2)));
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":230
* # This is allocated as one block, strides first.
* info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2)
* info.shape = info.strides + ndim # <<<<<<<<<<<<<<
* for i in range(ndim):
* info.strides[i] = PyArray_STRIDES(self)[i]
*/
__pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim);
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":231
* info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2)
* info.shape = info.strides + ndim
* for i in range(ndim): # <<<<<<<<<<<<<<
* info.strides[i] = PyArray_STRIDES(self)[i]
* info.shape[i] = PyArray_DIMS(self)[i]
*/
__pyx_t_4 = __pyx_v_ndim;
for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) {
__pyx_v_i = __pyx_t_5;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":232
* info.shape = info.strides + ndim
* for i in range(ndim):
* info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<<
* info.shape[i] = PyArray_DIMS(self)[i]
* else:
*/
(__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]);
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":233
* for i in range(ndim):
* info.strides[i] = PyArray_STRIDES(self)[i]
* info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<<
* else:
* info.strides = <Py_ssize_t*>PyArray_STRIDES(self)
*/
(__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]);
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":226
* info.buf = PyArray_DATA(self)
* info.ndim = ndim
* if copy_shape: # <<<<<<<<<<<<<<
* # Allocate new buffer for strides and shape info.
* # This is allocated as one block, strides first.
*/
goto __pyx_L11;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":235
* info.shape[i] = PyArray_DIMS(self)[i]
* else:
* info.strides = <Py_ssize_t*>PyArray_STRIDES(self) # <<<<<<<<<<<<<<
* info.shape = <Py_ssize_t*>PyArray_DIMS(self)
* info.suboffsets = NULL
*/
/*else*/ {
__pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self));
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":236
* else:
* info.strides = <Py_ssize_t*>PyArray_STRIDES(self)
* info.shape = <Py_ssize_t*>PyArray_DIMS(self) # <<<<<<<<<<<<<<
* info.suboffsets = NULL
* info.itemsize = PyArray_ITEMSIZE(self)
*/
__pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(__pyx_v_self));
}
__pyx_L11:;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":237
* info.strides = <Py_ssize_t*>PyArray_STRIDES(self)
* info.shape = <Py_ssize_t*>PyArray_DIMS(self)
* info.suboffsets = NULL # <<<<<<<<<<<<<<
* info.itemsize = PyArray_ITEMSIZE(self)
* info.readonly = not PyArray_ISWRITEABLE(self)
*/
__pyx_v_info->suboffsets = NULL;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":238
* info.shape = <Py_ssize_t*>PyArray_DIMS(self)
* info.suboffsets = NULL
* info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<<
* info.readonly = not PyArray_ISWRITEABLE(self)
*
*/
__pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self);
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":239
* info.suboffsets = NULL
* info.itemsize = PyArray_ITEMSIZE(self)
* info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<<
*
* cdef int t
*/
__pyx_v_info->readonly = (!(PyArray_ISWRITEABLE(__pyx_v_self) != 0));
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":242
*
* cdef int t
* cdef char* f = NULL # <<<<<<<<<<<<<<
* cdef dtype descr = self.descr
* cdef int offset
*/
__pyx_v_f = NULL;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":243
* cdef int t
* cdef char* f = NULL
* cdef dtype descr = self.descr # <<<<<<<<<<<<<<
* cdef int offset
*
*/
__pyx_t_3 = ((PyObject *)__pyx_v_self->descr);
__Pyx_INCREF(__pyx_t_3);
__pyx_v_descr = ((PyArray_Descr *)__pyx_t_3);
__pyx_t_3 = 0;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":246
* cdef int offset
*
* cdef bint hasfields = PyDataType_HASFIELDS(descr) # <<<<<<<<<<<<<<
*
* if not hasfields and not copy_shape:
*/
__pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr);
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":248
* cdef bint hasfields = PyDataType_HASFIELDS(descr)
*
* if not hasfields and not copy_shape: # <<<<<<<<<<<<<<
* # do not call releasebuffer
* info.obj = None
*/
__pyx_t_2 = ((!(__pyx_v_hasfields != 0)) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L15_bool_binop_done;
}
__pyx_t_2 = ((!(__pyx_v_copy_shape != 0)) != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L15_bool_binop_done:;
if (__pyx_t_1) {
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":250
* if not hasfields and not copy_shape:
* # do not call releasebuffer
* info.obj = None # <<<<<<<<<<<<<<
* else:
* # need to call releasebuffer
*/
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = Py_None;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":248
* cdef bint hasfields = PyDataType_HASFIELDS(descr)
*
* if not hasfields and not copy_shape: # <<<<<<<<<<<<<<
* # do not call releasebuffer
* info.obj = None
*/
goto __pyx_L14;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":253
* else:
* # need to call releasebuffer
* info.obj = self # <<<<<<<<<<<<<<
*
* if not hasfields:
*/
/*else*/ {
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = ((PyObject *)__pyx_v_self);
}
__pyx_L14:;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":255
* info.obj = self
*
* if not hasfields: # <<<<<<<<<<<<<<
* t = descr.type_num
* if ((descr.byteorder == c'>' and little_endian) or
*/
__pyx_t_1 = ((!(__pyx_v_hasfields != 0)) != 0);
if (__pyx_t_1) {
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":256
*
* if not hasfields:
* t = descr.type_num # <<<<<<<<<<<<<<
* if ((descr.byteorder == c'>' and little_endian) or
* (descr.byteorder == c'<' and not little_endian)):
*/
__pyx_t_4 = __pyx_v_descr->type_num;
__pyx_v_t = __pyx_t_4;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":257
* if not hasfields:
* t = descr.type_num
* if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<<
* (descr.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
*/
__pyx_t_2 = ((__pyx_v_descr->byteorder == '>') != 0);
if (!__pyx_t_2) {
goto __pyx_L20_next_or;
} else {
}
__pyx_t_2 = (__pyx_v_little_endian != 0);
if (!__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L19_bool_binop_done;
}
__pyx_L20_next_or:;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":258
* t = descr.type_num
* if ((descr.byteorder == c'>' and little_endian) or
* (descr.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<<
* raise ValueError(u"Non-native byte order not supported")
* if t == NPY_BYTE: f = "b"
*/
__pyx_t_2 = ((__pyx_v_descr->byteorder == '<') != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L19_bool_binop_done;
}
__pyx_t_2 = ((!(__pyx_v_little_endian != 0)) != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L19_bool_binop_done:;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":257
* if not hasfields:
* t = descr.type_num
* if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<<
* (descr.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
*/
if (__pyx_t_1) {
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":259
* if ((descr.byteorder == c'>' and little_endian) or
* (descr.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<<
* if t == NPY_BYTE: f = "b"
* elif t == NPY_UBYTE: f = "B"
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 259, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 259, __pyx_L1_error)
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":257
* if not hasfields:
* t = descr.type_num
* if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<<
* (descr.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
*/
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":260
* (descr.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
* if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<<
* elif t == NPY_UBYTE: f = "B"
* elif t == NPY_SHORT: f = "h"
*/
switch (__pyx_v_t) {
case NPY_BYTE:
__pyx_v_f = ((char *)"b");
break;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":261
* raise ValueError(u"Non-native byte order not supported")
* if t == NPY_BYTE: f = "b"
* elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<<
* elif t == NPY_SHORT: f = "h"
* elif t == NPY_USHORT: f = "H"
*/
case NPY_UBYTE:
__pyx_v_f = ((char *)"B");
break;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":262
* if t == NPY_BYTE: f = "b"
* elif t == NPY_UBYTE: f = "B"
* elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<<
* elif t == NPY_USHORT: f = "H"
* elif t == NPY_INT: f = "i"
*/
case NPY_SHORT:
__pyx_v_f = ((char *)"h");
break;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":263
* elif t == NPY_UBYTE: f = "B"
* elif t == NPY_SHORT: f = "h"
* elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<<
* elif t == NPY_INT: f = "i"
* elif t == NPY_UINT: f = "I"
*/
case NPY_USHORT:
__pyx_v_f = ((char *)"H");
break;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":264
* elif t == NPY_SHORT: f = "h"
* elif t == NPY_USHORT: f = "H"
* elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<<
* elif t == NPY_UINT: f = "I"
* elif t == NPY_LONG: f = "l"
*/
case NPY_INT:
__pyx_v_f = ((char *)"i");
break;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":265
* elif t == NPY_USHORT: f = "H"
* elif t == NPY_INT: f = "i"
* elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<<
* elif t == NPY_LONG: f = "l"
* elif t == NPY_ULONG: f = "L"
*/
case NPY_UINT:
__pyx_v_f = ((char *)"I");
break;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":266
* elif t == NPY_INT: f = "i"
* elif t == NPY_UINT: f = "I"
* elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<<
* elif t == NPY_ULONG: f = "L"
* elif t == NPY_LONGLONG: f = "q"
*/
case NPY_LONG:
__pyx_v_f = ((char *)"l");
break;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":267
* elif t == NPY_UINT: f = "I"
* elif t == NPY_LONG: f = "l"
* elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<<
* elif t == NPY_LONGLONG: f = "q"
* elif t == NPY_ULONGLONG: f = "Q"
*/
case NPY_ULONG:
__pyx_v_f = ((char *)"L");
break;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":268
* elif t == NPY_LONG: f = "l"
* elif t == NPY_ULONG: f = "L"
* elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<<
* elif t == NPY_ULONGLONG: f = "Q"
* elif t == NPY_FLOAT: f = "f"
*/
case NPY_LONGLONG:
__pyx_v_f = ((char *)"q");
break;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":269
* elif t == NPY_ULONG: f = "L"
* elif t == NPY_LONGLONG: f = "q"
* elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<<
* elif t == NPY_FLOAT: f = "f"
* elif t == NPY_DOUBLE: f = "d"
*/
case NPY_ULONGLONG:
__pyx_v_f = ((char *)"Q");
break;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":270
* elif t == NPY_LONGLONG: f = "q"
* elif t == NPY_ULONGLONG: f = "Q"
* elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<<
* elif t == NPY_DOUBLE: f = "d"
* elif t == NPY_LONGDOUBLE: f = "g"
*/
case NPY_FLOAT:
__pyx_v_f = ((char *)"f");
break;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":271
* elif t == NPY_ULONGLONG: f = "Q"
* elif t == NPY_FLOAT: f = "f"
* elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<<
* elif t == NPY_LONGDOUBLE: f = "g"
* elif t == NPY_CFLOAT: f = "Zf"
*/
case NPY_DOUBLE:
__pyx_v_f = ((char *)"d");
break;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":272
* elif t == NPY_FLOAT: f = "f"
* elif t == NPY_DOUBLE: f = "d"
* elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<<
* elif t == NPY_CFLOAT: f = "Zf"
* elif t == NPY_CDOUBLE: f = "Zd"
*/
case NPY_LONGDOUBLE:
__pyx_v_f = ((char *)"g");
break;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":273
* elif t == NPY_DOUBLE: f = "d"
* elif t == NPY_LONGDOUBLE: f = "g"
* elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<<
* elif t == NPY_CDOUBLE: f = "Zd"
* elif t == NPY_CLONGDOUBLE: f = "Zg"
*/
case NPY_CFLOAT:
__pyx_v_f = ((char *)"Zf");
break;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":274
* elif t == NPY_LONGDOUBLE: f = "g"
* elif t == NPY_CFLOAT: f = "Zf"
* elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<<
* elif t == NPY_CLONGDOUBLE: f = "Zg"
* elif t == NPY_OBJECT: f = "O"
*/
case NPY_CDOUBLE:
__pyx_v_f = ((char *)"Zd");
break;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":275
* elif t == NPY_CFLOAT: f = "Zf"
* elif t == NPY_CDOUBLE: f = "Zd"
* elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<<
* elif t == NPY_OBJECT: f = "O"
* else:
*/
case NPY_CLONGDOUBLE:
__pyx_v_f = ((char *)"Zg");
break;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":276
* elif t == NPY_CDOUBLE: f = "Zd"
* elif t == NPY_CLONGDOUBLE: f = "Zg"
* elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<<
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
*/
case NPY_OBJECT:
__pyx_v_f = ((char *)"O");
break;
default:
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":278
* elif t == NPY_OBJECT: f = "O"
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<<
* info.format = f
* return
*/
__pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 278, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_6 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_t_3); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 278, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 278, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_6);
__pyx_t_6 = 0;
__pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 278, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_Raise(__pyx_t_6, 0, 0, 0);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__PYX_ERR(1, 278, __pyx_L1_error)
break;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":279
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
* info.format = f # <<<<<<<<<<<<<<
* return
* else:
*/
__pyx_v_info->format = __pyx_v_f;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":280
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
* info.format = f
* return # <<<<<<<<<<<<<<
* else:
* info.format = <char*>stdlib.malloc(_buffer_format_string_len)
*/
__pyx_r = 0;
goto __pyx_L0;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":255
* info.obj = self
*
* if not hasfields: # <<<<<<<<<<<<<<
* t = descr.type_num
* if ((descr.byteorder == c'>' and little_endian) or
*/
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":282
* return
* else:
* info.format = <char*>stdlib.malloc(_buffer_format_string_len) # <<<<<<<<<<<<<<
* info.format[0] = c'^' # Native data types, manual alignment
* offset = 0
*/
/*else*/ {
__pyx_v_info->format = ((char *)malloc(0xFF));
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":283
* else:
* info.format = <char*>stdlib.malloc(_buffer_format_string_len)
* info.format[0] = c'^' # Native data types, manual alignment # <<<<<<<<<<<<<<
* offset = 0
* f = _util_dtypestring(descr, info.format + 1,
*/
(__pyx_v_info->format[0]) = '^';
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":284
* info.format = <char*>stdlib.malloc(_buffer_format_string_len)
* info.format[0] = c'^' # Native data types, manual alignment
* offset = 0 # <<<<<<<<<<<<<<
* f = _util_dtypestring(descr, info.format + 1,
* info.format + _buffer_format_string_len,
*/
__pyx_v_offset = 0;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":285
* info.format[0] = c'^' # Native data types, manual alignment
* offset = 0
* f = _util_dtypestring(descr, info.format + 1, # <<<<<<<<<<<<<<
* info.format + _buffer_format_string_len,
* &offset)
*/
__pyx_t_7 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 0xFF), (&__pyx_v_offset)); if (unlikely(__pyx_t_7 == NULL)) __PYX_ERR(1, 285, __pyx_L1_error)
__pyx_v_f = __pyx_t_7;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":288
* info.format + _buffer_format_string_len,
* &offset)
* f[0] = c'\0' # Terminate format string # <<<<<<<<<<<<<<
*
* def __releasebuffer__(ndarray self, Py_buffer* info):
*/
(__pyx_v_f[0]) = '\x00';
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":197
* # experimental exception made for __getbuffer__ and __releasebuffer__
* # -- the details of this may change.
* def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<<
* # This implementation of getbuffer is geared towards Cython
* # requirements, and does not yet fullfill the PEP.
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL;
}
goto __pyx_L2;
__pyx_L0:;
if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) {
__Pyx_GOTREF(Py_None);
__Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL;
}
__pyx_L2:;
__Pyx_XDECREF((PyObject *)__pyx_v_descr);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":290
* f[0] = c'\0' # Terminate format string
*
* def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<<
* if PyArray_HASFIELDS(self):
* stdlib.free(info.format)
*/
/* Python wrapper */
static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/
static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__releasebuffer__ (wrapper)", 0);
__pyx_pf_5numpy_7ndarray_2__releasebuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info) {
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("__releasebuffer__", 0);
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":291
*
* def __releasebuffer__(ndarray self, Py_buffer* info):
* if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<<
* stdlib.free(info.format)
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
*/
__pyx_t_1 = (PyArray_HASFIELDS(__pyx_v_self) != 0);
if (__pyx_t_1) {
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":292
* def __releasebuffer__(ndarray self, Py_buffer* info):
* if PyArray_HASFIELDS(self):
* stdlib.free(info.format) # <<<<<<<<<<<<<<
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
* stdlib.free(info.strides)
*/
free(__pyx_v_info->format);
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":291
*
* def __releasebuffer__(ndarray self, Py_buffer* info):
* if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<<
* stdlib.free(info.format)
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
*/
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":293
* if PyArray_HASFIELDS(self):
* stdlib.free(info.format)
* if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<<
* stdlib.free(info.strides)
* # info.shape was stored after info.strides in the same block
*/
__pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0);
if (__pyx_t_1) {
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":294
* stdlib.free(info.format)
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
* stdlib.free(info.strides) # <<<<<<<<<<<<<<
* # info.shape was stored after info.strides in the same block
*
*/
free(__pyx_v_info->strides);
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":293
* if PyArray_HASFIELDS(self):
* stdlib.free(info.format)
* if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<<
* stdlib.free(info.strides)
* # info.shape was stored after info.strides in the same block
*/
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":290
* f[0] = c'\0' # Terminate format string
*
* def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<<
* if PyArray_HASFIELDS(self):
* stdlib.free(info.format)
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":770
* ctypedef npy_cdouble complex_t
*
* cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(1, <void*>a)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0);
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":771
*
* cdef inline object PyArray_MultiIterNew1(a):
* return PyArray_MultiIterNew(1, <void*>a) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew2(a, b):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 771, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":770
* ctypedef npy_cdouble complex_t
*
* cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(1, <void*>a)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":773
* return PyArray_MultiIterNew(1, <void*>a)
*
* cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0);
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":774
*
* cdef inline object PyArray_MultiIterNew2(a, b):
* return PyArray_MultiIterNew(2, <void*>a, <void*>b) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew3(a, b, c):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 774, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":773
* return PyArray_MultiIterNew(1, <void*>a)
*
* cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":776
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
* cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0);
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":777
*
* cdef inline object PyArray_MultiIterNew3(a, b, c):
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 777, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":776
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
* cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":779
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0);
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":780
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d):
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 780, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":779
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":782
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0);
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":783
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) # <<<<<<<<<<<<<<
*
* cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL:
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 783, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":782
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":785
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
* cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<<
* # Recursive utility function used in __getbuffer__ to get format
* # string. The new location in the format string is returned.
*/
static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) {
PyArray_Descr *__pyx_v_child = 0;
int __pyx_v_endian_detector;
int __pyx_v_little_endian;
PyObject *__pyx_v_fields = 0;
PyObject *__pyx_v_childname = NULL;
PyObject *__pyx_v_new_offset = NULL;
PyObject *__pyx_v_t = NULL;
char *__pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
Py_ssize_t __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
int __pyx_t_5;
int __pyx_t_6;
int __pyx_t_7;
long __pyx_t_8;
char *__pyx_t_9;
__Pyx_RefNannySetupContext("_util_dtypestring", 0);
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":790
*
* cdef dtype child
* cdef int endian_detector = 1 # <<<<<<<<<<<<<<
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
* cdef tuple fields
*/
__pyx_v_endian_detector = 1;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":791
* cdef dtype child
* cdef int endian_detector = 1
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<<
* cdef tuple fields
*
*/
__pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0);
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":794
* cdef tuple fields
*
* for childname in descr.names: # <<<<<<<<<<<<<<
* fields = descr.fields[childname]
* child, new_offset = fields
*/
if (unlikely(__pyx_v_descr->names == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
__PYX_ERR(1, 794, __pyx_L1_error)
}
__pyx_t_1 = __pyx_v_descr->names; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0;
for (;;) {
if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break;
#if CYTHON_COMPILING_IN_CPYTHON
__pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) __PYX_ERR(1, 794, __pyx_L1_error)
#else
__pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 794, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
#endif
__Pyx_XDECREF_SET(__pyx_v_childname, __pyx_t_3);
__pyx_t_3 = 0;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":795
*
* for childname in descr.names:
* fields = descr.fields[childname] # <<<<<<<<<<<<<<
* child, new_offset = fields
*
*/
if (unlikely(__pyx_v_descr->fields == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 795, __pyx_L1_error)
}
__pyx_t_3 = __Pyx_PyDict_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 795, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_t_3)->tp_name), 0))) __PYX_ERR(1, 795, __pyx_L1_error)
__Pyx_XDECREF_SET(__pyx_v_fields, ((PyObject*)__pyx_t_3));
__pyx_t_3 = 0;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":796
* for childname in descr.names:
* fields = descr.fields[childname]
* child, new_offset = fields # <<<<<<<<<<<<<<
*
* if (end - f) - <int>(new_offset - offset[0]) < 15:
*/
if (likely(__pyx_v_fields != Py_None)) {
PyObject* sequence = __pyx_v_fields;
#if CYTHON_COMPILING_IN_CPYTHON
Py_ssize_t size = Py_SIZE(sequence);
#else
Py_ssize_t size = PySequence_Size(sequence);
#endif
if (unlikely(size != 2)) {
if (size > 2) __Pyx_RaiseTooManyValuesError(2);
else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
__PYX_ERR(1, 796, __pyx_L1_error)
}
#if CYTHON_COMPILING_IN_CPYTHON
__pyx_t_3 = PyTuple_GET_ITEM(sequence, 0);
__pyx_t_4 = PyTuple_GET_ITEM(sequence, 1);
__Pyx_INCREF(__pyx_t_3);
__Pyx_INCREF(__pyx_t_4);
#else
__pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 796, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 796, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
#endif
} else {
__Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 796, __pyx_L1_error)
}
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) __PYX_ERR(1, 796, __pyx_L1_error)
__Pyx_XDECREF_SET(__pyx_v_child, ((PyArray_Descr *)__pyx_t_3));
__pyx_t_3 = 0;
__Pyx_XDECREF_SET(__pyx_v_new_offset, __pyx_t_4);
__pyx_t_4 = 0;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":798
* child, new_offset = fields
*
* if (end - f) - <int>(new_offset - offset[0]) < 15: # <<<<<<<<<<<<<<
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
*
*/
__pyx_t_4 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 798, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 798, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 798, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = ((((__pyx_v_end - __pyx_v_f) - ((int)__pyx_t_5)) < 15) != 0);
if (__pyx_t_6) {
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":799
*
* if (end - f) - <int>(new_offset - offset[0]) < 15:
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<<
*
* if ((child.byteorder == c'>' and little_endian) or
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 799, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 799, __pyx_L1_error)
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":798
* child, new_offset = fields
*
* if (end - f) - <int>(new_offset - offset[0]) < 15: # <<<<<<<<<<<<<<
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
*
*/
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":801
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
*
* if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<<
* (child.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
*/
__pyx_t_7 = ((__pyx_v_child->byteorder == '>') != 0);
if (!__pyx_t_7) {
goto __pyx_L8_next_or;
} else {
}
__pyx_t_7 = (__pyx_v_little_endian != 0);
if (!__pyx_t_7) {
} else {
__pyx_t_6 = __pyx_t_7;
goto __pyx_L7_bool_binop_done;
}
__pyx_L8_next_or:;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":802
*
* if ((child.byteorder == c'>' and little_endian) or
* (child.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<<
* raise ValueError(u"Non-native byte order not supported")
* # One could encode it in the format string and have Cython
*/
__pyx_t_7 = ((__pyx_v_child->byteorder == '<') != 0);
if (__pyx_t_7) {
} else {
__pyx_t_6 = __pyx_t_7;
goto __pyx_L7_bool_binop_done;
}
__pyx_t_7 = ((!(__pyx_v_little_endian != 0)) != 0);
__pyx_t_6 = __pyx_t_7;
__pyx_L7_bool_binop_done:;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":801
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
*
* if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<<
* (child.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
*/
if (__pyx_t_6) {
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":803
* if ((child.byteorder == c'>' and little_endian) or
* (child.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<<
* # One could encode it in the format string and have Cython
* # complain instead, BUT: < and > in format strings also imply
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 803, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 803, __pyx_L1_error)
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":801
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
*
* if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<<
* (child.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
*/
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":813
*
* # Output padding bytes
* while offset[0] < new_offset: # <<<<<<<<<<<<<<
* f[0] = 120 # "x"; pad byte
* f += 1
*/
while (1) {
__pyx_t_3 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 813, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_t_3, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 813, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 813, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (!__pyx_t_6) break;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":814
* # Output padding bytes
* while offset[0] < new_offset:
* f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<<
* f += 1
* offset[0] += 1
*/
(__pyx_v_f[0]) = 0x78;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":815
* while offset[0] < new_offset:
* f[0] = 120 # "x"; pad byte
* f += 1 # <<<<<<<<<<<<<<
* offset[0] += 1
*
*/
__pyx_v_f = (__pyx_v_f + 1);
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":816
* f[0] = 120 # "x"; pad byte
* f += 1
* offset[0] += 1 # <<<<<<<<<<<<<<
*
* offset[0] += child.itemsize
*/
__pyx_t_8 = 0;
(__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + 1);
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":818
* offset[0] += 1
*
* offset[0] += child.itemsize # <<<<<<<<<<<<<<
*
* if not PyDataType_HASFIELDS(child):
*/
__pyx_t_8 = 0;
(__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + __pyx_v_child->elsize);
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":820
* offset[0] += child.itemsize
*
* if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<<
* t = child.type_num
* if end - f < 5:
*/
__pyx_t_6 = ((!(PyDataType_HASFIELDS(__pyx_v_child) != 0)) != 0);
if (__pyx_t_6) {
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":821
*
* if not PyDataType_HASFIELDS(child):
* t = child.type_num # <<<<<<<<<<<<<<
* if end - f < 5:
* raise RuntimeError(u"Format string allocated too short.")
*/
__pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_child->type_num); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 821, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_XDECREF_SET(__pyx_v_t, __pyx_t_4);
__pyx_t_4 = 0;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":822
* if not PyDataType_HASFIELDS(child):
* t = child.type_num
* if end - f < 5: # <<<<<<<<<<<<<<
* raise RuntimeError(u"Format string allocated too short.")
*
*/
__pyx_t_6 = (((__pyx_v_end - __pyx_v_f) < 5) != 0);
if (__pyx_t_6) {
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":823
* t = child.type_num
* if end - f < 5:
* raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<<
*
* # Until ticket #99 is fixed, use integers to avoid warnings
*/
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 823, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_Raise(__pyx_t_4, 0, 0, 0);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__PYX_ERR(1, 823, __pyx_L1_error)
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":822
* if not PyDataType_HASFIELDS(child):
* t = child.type_num
* if end - f < 5: # <<<<<<<<<<<<<<
* raise RuntimeError(u"Format string allocated too short.")
*
*/
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":826
*
* # Until ticket #99 is fixed, use integers to avoid warnings
* if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<<
* elif t == NPY_UBYTE: f[0] = 66 #"B"
* elif t == NPY_SHORT: f[0] = 104 #"h"
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_BYTE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 826, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 826, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 826, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 98;
goto __pyx_L15;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":827
* # Until ticket #99 is fixed, use integers to avoid warnings
* if t == NPY_BYTE: f[0] = 98 #"b"
* elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<<
* elif t == NPY_SHORT: f[0] = 104 #"h"
* elif t == NPY_USHORT: f[0] = 72 #"H"
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UBYTE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 827, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 827, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 827, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 66;
goto __pyx_L15;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":828
* if t == NPY_BYTE: f[0] = 98 #"b"
* elif t == NPY_UBYTE: f[0] = 66 #"B"
* elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<<
* elif t == NPY_USHORT: f[0] = 72 #"H"
* elif t == NPY_INT: f[0] = 105 #"i"
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_SHORT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 828, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 828, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 828, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 0x68;
goto __pyx_L15;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":829
* elif t == NPY_UBYTE: f[0] = 66 #"B"
* elif t == NPY_SHORT: f[0] = 104 #"h"
* elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<<
* elif t == NPY_INT: f[0] = 105 #"i"
* elif t == NPY_UINT: f[0] = 73 #"I"
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_USHORT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 829, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 829, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 829, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 72;
goto __pyx_L15;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":830
* elif t == NPY_SHORT: f[0] = 104 #"h"
* elif t == NPY_USHORT: f[0] = 72 #"H"
* elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<<
* elif t == NPY_UINT: f[0] = 73 #"I"
* elif t == NPY_LONG: f[0] = 108 #"l"
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_INT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 830, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 830, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 830, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 0x69;
goto __pyx_L15;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":831
* elif t == NPY_USHORT: f[0] = 72 #"H"
* elif t == NPY_INT: f[0] = 105 #"i"
* elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<<
* elif t == NPY_LONG: f[0] = 108 #"l"
* elif t == NPY_ULONG: f[0] = 76 #"L"
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UINT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 831, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 831, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 831, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 73;
goto __pyx_L15;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":832
* elif t == NPY_INT: f[0] = 105 #"i"
* elif t == NPY_UINT: f[0] = 73 #"I"
* elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<<
* elif t == NPY_ULONG: f[0] = 76 #"L"
* elif t == NPY_LONGLONG: f[0] = 113 #"q"
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 832, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 832, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 832, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 0x6C;
goto __pyx_L15;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":833
* elif t == NPY_UINT: f[0] = 73 #"I"
* elif t == NPY_LONG: f[0] = 108 #"l"
* elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<<
* elif t == NPY_LONGLONG: f[0] = 113 #"q"
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 833, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 833, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 833, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 76;
goto __pyx_L15;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":834
* elif t == NPY_LONG: f[0] = 108 #"l"
* elif t == NPY_ULONG: f[0] = 76 #"L"
* elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<<
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
* elif t == NPY_FLOAT: f[0] = 102 #"f"
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGLONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 834, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 834, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 834, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 0x71;
goto __pyx_L15;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":835
* elif t == NPY_ULONG: f[0] = 76 #"L"
* elif t == NPY_LONGLONG: f[0] = 113 #"q"
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<<
* elif t == NPY_FLOAT: f[0] = 102 #"f"
* elif t == NPY_DOUBLE: f[0] = 100 #"d"
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONGLONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 835, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 835, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 835, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 81;
goto __pyx_L15;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":836
* elif t == NPY_LONGLONG: f[0] = 113 #"q"
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
* elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<<
* elif t == NPY_DOUBLE: f[0] = 100 #"d"
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_FLOAT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 836, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 836, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 836, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 0x66;
goto __pyx_L15;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":837
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
* elif t == NPY_FLOAT: f[0] = 102 #"f"
* elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<<
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_DOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 837, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 837, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 837, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 0x64;
goto __pyx_L15;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":838
* elif t == NPY_FLOAT: f[0] = 102 #"f"
* elif t == NPY_DOUBLE: f[0] = 100 #"d"
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<<
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 838, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 838, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 838, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 0x67;
goto __pyx_L15;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":839
* elif t == NPY_DOUBLE: f[0] = 100 #"d"
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<<
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
* elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CFLOAT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 839, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 839, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 839, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 90;
(__pyx_v_f[1]) = 0x66;
__pyx_v_f = (__pyx_v_f + 1);
goto __pyx_L15;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":840
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<<
* elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
* elif t == NPY_OBJECT: f[0] = 79 #"O"
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 840, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 840, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 840, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 90;
(__pyx_v_f[1]) = 0x64;
__pyx_v_f = (__pyx_v_f + 1);
goto __pyx_L15;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":841
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
* elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<<
* elif t == NPY_OBJECT: f[0] = 79 #"O"
* else:
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 841, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 841, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 841, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 90;
(__pyx_v_f[1]) = 0x67;
__pyx_v_f = (__pyx_v_f + 1);
goto __pyx_L15;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":842
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
* elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
* elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<<
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_OBJECT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 842, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 842, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 842, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 79;
goto __pyx_L15;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":844
* elif t == NPY_OBJECT: f[0] = 79 #"O"
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<<
* f += 1
* else:
*/
/*else*/ {
__pyx_t_3 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 844, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 844, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3);
__pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 844, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 844, __pyx_L1_error)
}
__pyx_L15:;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":845
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
* f += 1 # <<<<<<<<<<<<<<
* else:
* # Cython ignores struct boundary information ("T{...}"),
*/
__pyx_v_f = (__pyx_v_f + 1);
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":820
* offset[0] += child.itemsize
*
* if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<<
* t = child.type_num
* if end - f < 5:
*/
goto __pyx_L13;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":849
* # Cython ignores struct boundary information ("T{...}"),
* # so don't output it
* f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<<
* return f
*
*/
/*else*/ {
__pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_9 == NULL)) __PYX_ERR(1, 849, __pyx_L1_error)
__pyx_v_f = __pyx_t_9;
}
__pyx_L13:;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":794
* cdef tuple fields
*
* for childname in descr.names: # <<<<<<<<<<<<<<
* fields = descr.fields[childname]
* child, new_offset = fields
*/
}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":850
* # so don't output it
* f = _util_dtypestring(child, f, end, offset)
* return f # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_f;
goto __pyx_L0;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":785
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
* cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<<
* # Recursive utility function used in __getbuffer__ to get format
* # string. The new location in the format string is returned.
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_child);
__Pyx_XDECREF(__pyx_v_fields);
__Pyx_XDECREF(__pyx_v_childname);
__Pyx_XDECREF(__pyx_v_new_offset);
__Pyx_XDECREF(__pyx_v_t);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":966
*
*
* cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<<
* cdef PyObject* baseptr
* if base is None:
*/
static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) {
PyObject *__pyx_v_baseptr;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
__Pyx_RefNannySetupContext("set_array_base", 0);
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":968
* cdef inline void set_array_base(ndarray arr, object base):
* cdef PyObject* baseptr
* if base is None: # <<<<<<<<<<<<<<
* baseptr = NULL
* else:
*/
__pyx_t_1 = (__pyx_v_base == Py_None);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":969
* cdef PyObject* baseptr
* if base is None:
* baseptr = NULL # <<<<<<<<<<<<<<
* else:
* Py_INCREF(base) # important to do this before decref below!
*/
__pyx_v_baseptr = NULL;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":968
* cdef inline void set_array_base(ndarray arr, object base):
* cdef PyObject* baseptr
* if base is None: # <<<<<<<<<<<<<<
* baseptr = NULL
* else:
*/
goto __pyx_L3;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":971
* baseptr = NULL
* else:
* Py_INCREF(base) # important to do this before decref below! # <<<<<<<<<<<<<<
* baseptr = <PyObject*>base
* Py_XDECREF(arr.base)
*/
/*else*/ {
Py_INCREF(__pyx_v_base);
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":972
* else:
* Py_INCREF(base) # important to do this before decref below!
* baseptr = <PyObject*>base # <<<<<<<<<<<<<<
* Py_XDECREF(arr.base)
* arr.base = baseptr
*/
__pyx_v_baseptr = ((PyObject *)__pyx_v_base);
}
__pyx_L3:;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":973
* Py_INCREF(base) # important to do this before decref below!
* baseptr = <PyObject*>base
* Py_XDECREF(arr.base) # <<<<<<<<<<<<<<
* arr.base = baseptr
*
*/
Py_XDECREF(__pyx_v_arr->base);
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":974
* baseptr = <PyObject*>base
* Py_XDECREF(arr.base)
* arr.base = baseptr # <<<<<<<<<<<<<<
*
* cdef inline object get_array_base(ndarray arr):
*/
__pyx_v_arr->base = __pyx_v_baseptr;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":966
*
*
* cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<<
* cdef PyObject* baseptr
* if base is None:
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":976
* arr.base = baseptr
*
* cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<<
* if arr.base is NULL:
* return None
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("get_array_base", 0);
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":977
*
* cdef inline object get_array_base(ndarray arr):
* if arr.base is NULL: # <<<<<<<<<<<<<<
* return None
* else:
*/
__pyx_t_1 = ((__pyx_v_arr->base == NULL) != 0);
if (__pyx_t_1) {
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":978
* cdef inline object get_array_base(ndarray arr):
* if arr.base is NULL:
* return None # <<<<<<<<<<<<<<
* else:
* return <object>arr.base
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(Py_None);
__pyx_r = Py_None;
goto __pyx_L0;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":977
*
* cdef inline object get_array_base(ndarray arr):
* if arr.base is NULL: # <<<<<<<<<<<<<<
* return None
* else:
*/
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":980
* return None
* else:
* return <object>arr.base # <<<<<<<<<<<<<<
*/
/*else*/ {
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_arr->base));
__pyx_r = ((PyObject *)__pyx_v_arr->base);
goto __pyx_L0;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":976
* arr.base = baseptr
*
* cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<<
* if arr.base is NULL:
* return None
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyMethodDef __pyx_methods[] = {
{0, 0, 0, 0}
};
#if PY_MAJOR_VERSION >= 3
static struct PyModuleDef __pyx_moduledef = {
#if PY_VERSION_HEX < 0x03020000
{ PyObject_HEAD_INIT(NULL) NULL, 0, NULL },
#else
PyModuleDef_HEAD_INIT,
#endif
"gpu_nms",
0, /* m_doc */
-1, /* m_size */
__pyx_methods /* m_methods */,
NULL, /* m_reload */
NULL, /* m_traverse */
NULL, /* m_clear */
NULL /* m_free */
};
#endif
static __Pyx_StringTabEntry __pyx_string_tab[] = {
{&__pyx_kp_s_D_v_zix_caffe_caffe_win_20160523, __pyx_k_D_v_zix_caffe_caffe_win_20160523, sizeof(__pyx_k_D_v_zix_caffe_caffe_win_20160523), 0, 0, 1, 0},
{&__pyx_kp_u_Format_string_allocated_too_shor, __pyx_k_Format_string_allocated_too_shor, sizeof(__pyx_k_Format_string_allocated_too_shor), 0, 1, 0, 0},
{&__pyx_kp_u_Format_string_allocated_too_shor_2, __pyx_k_Format_string_allocated_too_shor_2, sizeof(__pyx_k_Format_string_allocated_too_shor_2), 0, 1, 0, 0},
{&__pyx_kp_u_Non_native_byte_order_not_suppor, __pyx_k_Non_native_byte_order_not_suppor, sizeof(__pyx_k_Non_native_byte_order_not_suppor), 0, 1, 0, 0},
{&__pyx_n_s_RuntimeError, __pyx_k_RuntimeError, sizeof(__pyx_k_RuntimeError), 0, 0, 1, 1},
{&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1},
{&__pyx_n_s_argsort, __pyx_k_argsort, sizeof(__pyx_k_argsort), 0, 0, 1, 1},
{&__pyx_n_s_boxes_dim, __pyx_k_boxes_dim, sizeof(__pyx_k_boxes_dim), 0, 0, 1, 1},
{&__pyx_n_s_boxes_num, __pyx_k_boxes_num, sizeof(__pyx_k_boxes_num), 0, 0, 1, 1},
{&__pyx_n_s_dets, __pyx_k_dets, sizeof(__pyx_k_dets), 0, 0, 1, 1},
{&__pyx_n_s_device_id, __pyx_k_device_id, sizeof(__pyx_k_device_id), 0, 0, 1, 1},
{&__pyx_n_s_dtype, __pyx_k_dtype, sizeof(__pyx_k_dtype), 0, 0, 1, 1},
{&__pyx_n_s_gpu_nms, __pyx_k_gpu_nms, sizeof(__pyx_k_gpu_nms), 0, 0, 1, 1},
{&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1},
{&__pyx_n_s_int32, __pyx_k_int32, sizeof(__pyx_k_int32), 0, 0, 1, 1},
{&__pyx_n_s_keep, __pyx_k_keep, sizeof(__pyx_k_keep), 0, 0, 1, 1},
{&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1},
{&__pyx_kp_u_ndarray_is_not_C_contiguous, __pyx_k_ndarray_is_not_C_contiguous, sizeof(__pyx_k_ndarray_is_not_C_contiguous), 0, 1, 0, 0},
{&__pyx_kp_u_ndarray_is_not_Fortran_contiguou, __pyx_k_ndarray_is_not_Fortran_contiguou, sizeof(__pyx_k_ndarray_is_not_Fortran_contiguou), 0, 1, 0, 0},
{&__pyx_n_s_nms_gpu_nms, __pyx_k_nms_gpu_nms, sizeof(__pyx_k_nms_gpu_nms), 0, 0, 1, 1},
{&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1},
{&__pyx_n_s_num_out, __pyx_k_num_out, sizeof(__pyx_k_num_out), 0, 0, 1, 1},
{&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1},
{&__pyx_n_s_order, __pyx_k_order, sizeof(__pyx_k_order), 0, 0, 1, 1},
{&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1},
{&__pyx_n_s_scores, __pyx_k_scores, sizeof(__pyx_k_scores), 0, 0, 1, 1},
{&__pyx_n_s_sorted_dets, __pyx_k_sorted_dets, sizeof(__pyx_k_sorted_dets), 0, 0, 1, 1},
{&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1},
{&__pyx_n_s_thresh, __pyx_k_thresh, sizeof(__pyx_k_thresh), 0, 0, 1, 1},
{&__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_k_unknown_dtype_code_in_numpy_pxd, sizeof(__pyx_k_unknown_dtype_code_in_numpy_pxd), 0, 1, 0, 0},
{&__pyx_n_s_zeros, __pyx_k_zeros, sizeof(__pyx_k_zeros), 0, 0, 1, 1},
{0, 0, 0, 0, 0, 0, 0}
};
static int __Pyx_InitCachedBuiltins(void) {
__pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(1, 218, __pyx_L1_error)
__pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(1, 231, __pyx_L1_error)
__pyx_builtin_RuntimeError = __Pyx_GetBuiltinName(__pyx_n_s_RuntimeError); if (!__pyx_builtin_RuntimeError) __PYX_ERR(1, 799, __pyx_L1_error)
return 0;
__pyx_L1_error:;
return -1;
}
static int __Pyx_InitCachedConstants(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0);
/* "nms/gpu_nms.pyx":24
* keep = np.zeros(boxes_num, dtype=np.int32)
* cdef np.ndarray[np.float32_t, ndim=1] \
* scores = dets[:, 4] # <<<<<<<<<<<<<<
* #cdef np.ndarray[np.int_t, ndim=1] \ // 20160601, by xzn
* # order = scores.argsort()[::-1]
*/
__pyx_slice_ = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice_)) __PYX_ERR(0, 24, __pyx_L1_error)
__Pyx_GOTREF(__pyx_slice_);
__Pyx_GIVEREF(__pyx_slice_);
__pyx_tuple__2 = PyTuple_Pack(2, __pyx_slice_, __pyx_int_4); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(0, 24, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__2);
__Pyx_GIVEREF(__pyx_tuple__2);
/* "nms/gpu_nms.pyx":28
* # order = scores.argsort()[::-1]
* cdef np.ndarray[np.intp_t, ndim=1] \
* order = scores.argsort()[::-1] # <<<<<<<<<<<<<<
* cdef np.ndarray[np.float32_t, ndim=2] \
* sorted_dets = dets[order, :]
*/
__pyx_slice__3 = PySlice_New(Py_None, Py_None, __pyx_int_neg_1); if (unlikely(!__pyx_slice__3)) __PYX_ERR(0, 28, __pyx_L1_error)
__Pyx_GOTREF(__pyx_slice__3);
__Pyx_GIVEREF(__pyx_slice__3);
/* "nms/gpu_nms.pyx":30
* order = scores.argsort()[::-1]
* cdef np.ndarray[np.float32_t, ndim=2] \
* sorted_dets = dets[order, :] # <<<<<<<<<<<<<<
* _nms(&keep[0], &num_out, &sorted_dets[0, 0], boxes_num, boxes_dim, thresh, device_id)
* keep = keep[:num_out]
*/
__pyx_slice__4 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__4)) __PYX_ERR(0, 30, __pyx_L1_error)
__Pyx_GOTREF(__pyx_slice__4);
__Pyx_GIVEREF(__pyx_slice__4);
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":218
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
* raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<<
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
*/
__pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_C_contiguous); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 218, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__5);
__Pyx_GIVEREF(__pyx_tuple__5);
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":222
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
* raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<<
*
* info.buf = PyArray_DATA(self)
*/
__pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_Fortran_contiguou); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 222, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__6);
__Pyx_GIVEREF(__pyx_tuple__6);
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":259
* if ((descr.byteorder == c'>' and little_endian) or
* (descr.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<<
* if t == NPY_BYTE: f = "b"
* elif t == NPY_UBYTE: f = "B"
*/
__pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 259, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__7);
__Pyx_GIVEREF(__pyx_tuple__7);
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":799
*
* if (end - f) - <int>(new_offset - offset[0]) < 15:
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<<
*
* if ((child.byteorder == c'>' and little_endian) or
*/
__pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 799, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__8);
__Pyx_GIVEREF(__pyx_tuple__8);
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":803
* if ((child.byteorder == c'>' and little_endian) or
* (child.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<<
* # One could encode it in the format string and have Cython
* # complain instead, BUT: < and > in format strings also imply
*/
__pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 803, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__9);
__Pyx_GIVEREF(__pyx_tuple__9);
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":823
* t = child.type_num
* if end - f < 5:
* raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<<
*
* # Until ticket #99 is fixed, use integers to avoid warnings
*/
__pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor_2); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(1, 823, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__10);
__Pyx_GIVEREF(__pyx_tuple__10);
/* "nms/gpu_nms.pyx":16
* void _nms(np.int32_t*, int*, np.float32_t*, int, int, float, int)
*
* def gpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh, # <<<<<<<<<<<<<<
* np.int32_t device_id=0):
* cdef int boxes_num = dets.shape[0]
*/
__pyx_tuple__11 = PyTuple_Pack(10, __pyx_n_s_dets, __pyx_n_s_thresh, __pyx_n_s_device_id, __pyx_n_s_boxes_num, __pyx_n_s_boxes_dim, __pyx_n_s_num_out, __pyx_n_s_keep, __pyx_n_s_scores, __pyx_n_s_order, __pyx_n_s_sorted_dets); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__11);
__Pyx_GIVEREF(__pyx_tuple__11);
__pyx_codeobj__12 = (PyObject*)__Pyx_PyCode_New(3, 0, 10, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__11, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_D_v_zix_caffe_caffe_win_20160523, __pyx_n_s_gpu_nms, 16, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__12)) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_RefNannyFinishContext();
return -1;
}
static int __Pyx_InitGlobals(void) {
if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
__pyx_int_4 = PyInt_FromLong(4); if (unlikely(!__pyx_int_4)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) __PYX_ERR(0, 1, __pyx_L1_error)
return 0;
__pyx_L1_error:;
return -1;
}
#if PY_MAJOR_VERSION < 3
PyMODINIT_FUNC initgpu_nms(void); /*proto*/
PyMODINIT_FUNC initgpu_nms(void)
#else
PyMODINIT_FUNC PyInit_gpu_nms(void); /*proto*/
PyMODINIT_FUNC PyInit_gpu_nms(void)
#endif
{
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannyDeclarations
#if CYTHON_REFNANNY
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny");
if (!__Pyx_RefNanny) {
PyErr_Clear();
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny");
if (!__Pyx_RefNanny)
Py_FatalError("failed to import 'refnanny' module");
}
#endif
__Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit_gpu_nms(void)", 0);
if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error)
#ifdef __Pyx_CyFunction_USED
if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_FusedFunction_USED
if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_Coroutine_USED
if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_Generator_USED
if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_StopAsyncIteration_USED
if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
/*--- Library function declarations ---*/
/*--- Threads initialization code ---*/
#if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS
#ifdef WITH_THREAD /* Python build with threading support? */
PyEval_InitThreads();
#endif
#endif
/*--- Module creation code ---*/
#if PY_MAJOR_VERSION < 3
__pyx_m = Py_InitModule4("gpu_nms", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m);
#else
__pyx_m = PyModule_Create(&__pyx_moduledef);
#endif
if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_d);
__pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error)
#if CYTHON_COMPILING_IN_PYPY
Py_INCREF(__pyx_b);
#endif
if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
/*--- Initialize various global constants etc. ---*/
if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)
if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
if (__pyx_module_is_main_nms__gpu_nms) {
if (PyObject_SetAttrString(__pyx_m, "__name__", __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
}
#if PY_MAJOR_VERSION >= 3
{
PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error)
if (!PyDict_GetItemString(modules, "nms.gpu_nms")) {
if (unlikely(PyDict_SetItemString(modules, "nms.gpu_nms", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error)
}
}
#endif
/*--- Builtin init code ---*/
if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
/*--- Constants init code ---*/
if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
/*--- Global init code ---*/
/*--- Variable export code ---*/
/*--- Function export code ---*/
/*--- Type init code ---*/
/*--- Type import code ---*/
__pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "type",
#if CYTHON_COMPILING_IN_PYPY
sizeof(PyTypeObject),
#else
sizeof(PyHeapTypeObject),
#endif
0); if (unlikely(!__pyx_ptype_7cpython_4type_type)) __PYX_ERR(2, 9, __pyx_L1_error)
__pyx_ptype_5numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_5numpy_dtype)) __PYX_ERR(1, 155, __pyx_L1_error)
__pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) __PYX_ERR(1, 168, __pyx_L1_error)
__pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) __PYX_ERR(1, 172, __pyx_L1_error)
__pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) __PYX_ERR(1, 181, __pyx_L1_error)
__pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) __PYX_ERR(1, 861, __pyx_L1_error)
/*--- Variable import code ---*/
/*--- Function import code ---*/
/*--- Execution code ---*/
#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED)
if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
/* "nms/gpu_nms.pyx":8
* # --------------------------------------------------------
*
* import numpy as np # <<<<<<<<<<<<<<
* cimport numpy as np
*
*/
__pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_1) < 0) __PYX_ERR(0, 8, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "nms/gpu_nms.pyx":11
* cimport numpy as np
*
* assert sizeof(int) == sizeof(np.int32_t) # <<<<<<<<<<<<<<
*
* cdef extern from "gpu_nms.hpp":
*/
#ifndef CYTHON_WITHOUT_ASSERTIONS
if (unlikely(!Py_OptimizeFlag)) {
if (unlikely(!(((sizeof(int)) == (sizeof(__pyx_t_5numpy_int32_t))) != 0))) {
PyErr_SetNone(PyExc_AssertionError);
__PYX_ERR(0, 11, __pyx_L1_error)
}
}
#endif
/* "nms/gpu_nms.pyx":16
* void _nms(np.int32_t*, int*, np.float32_t*, int, int, float, int)
*
* def gpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh, # <<<<<<<<<<<<<<
* np.int32_t device_id=0):
* cdef int boxes_num = dets.shape[0]
*/
__pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_3nms_7gpu_nms_1gpu_nms, NULL, __pyx_n_s_nms_gpu_nms); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_gpu_nms, __pyx_t_1) < 0) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "nms/gpu_nms.pyx":1
* # -------------------------------------------------------- # <<<<<<<<<<<<<<
* # Faster R-CNN
* # Copyright (c) 2015 Microsoft
*/
__pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":976
* arr.base = baseptr
*
* cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<<
* if arr.base is NULL:
* return None
*/
/*--- Wrapped vars code ---*/
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
if (__pyx_m) {
if (__pyx_d) {
__Pyx_AddTraceback("init nms.gpu_nms", __pyx_clineno, __pyx_lineno, __pyx_filename);
}
Py_DECREF(__pyx_m); __pyx_m = 0;
} else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_ImportError, "init nms.gpu_nms");
}
__pyx_L0:;
__Pyx_RefNannyFinishContext();
#if PY_MAJOR_VERSION < 3
return;
#else
return __pyx_m;
#endif
}
/* --- Runtime support code --- */
/* Refnanny */
#if CYTHON_REFNANNY
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) {
PyObject *m = NULL, *p = NULL;
void *r = NULL;
m = PyImport_ImportModule((char *)modname);
if (!m) goto end;
p = PyObject_GetAttrString(m, (char *)"RefNannyAPI");
if (!p) goto end;
r = PyLong_AsVoidPtr(p);
end:
Py_XDECREF(p);
Py_XDECREF(m);
return (__Pyx_RefNannyAPIStruct *)r;
}
#endif
/* RaiseArgTupleInvalid */
static void __Pyx_RaiseArgtupleInvalid(
const char* func_name,
int exact,
Py_ssize_t num_min,
Py_ssize_t num_max,
Py_ssize_t num_found)
{
Py_ssize_t num_expected;
const char *more_or_less;
if (num_found < num_min) {
num_expected = num_min;
more_or_less = "at least";
} else {
num_expected = num_max;
more_or_less = "at most";
}
if (exact) {
more_or_less = "exactly";
}
PyErr_Format(PyExc_TypeError,
"%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)",
func_name, more_or_less, num_expected,
(num_expected == 1) ? "" : "s", num_found);
}
/* RaiseDoubleKeywords */
static void __Pyx_RaiseDoubleKeywordsError(
const char* func_name,
PyObject* kw_name)
{
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION >= 3
"%s() got multiple values for keyword argument '%U'", func_name, kw_name);
#else
"%s() got multiple values for keyword argument '%s'", func_name,
PyString_AsString(kw_name));
#endif
}
/* ParseKeywords */
static int __Pyx_ParseOptionalKeywords(
PyObject *kwds,
PyObject **argnames[],
PyObject *kwds2,
PyObject *values[],
Py_ssize_t num_pos_args,
const char* function_name)
{
PyObject *key = 0, *value = 0;
Py_ssize_t pos = 0;
PyObject*** name;
PyObject*** first_kw_arg = argnames + num_pos_args;
while (PyDict_Next(kwds, &pos, &key, &value)) {
name = first_kw_arg;
while (*name && (**name != key)) name++;
if (*name) {
values[name-argnames] = value;
continue;
}
name = first_kw_arg;
#if PY_MAJOR_VERSION < 3
if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) {
while (*name) {
if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key))
&& _PyString_Eq(**name, key)) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
if ((**argname == key) || (
(CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key))
&& _PyString_Eq(**argname, key))) {
goto arg_passed_twice;
}
argname++;
}
}
} else
#endif
if (likely(PyUnicode_Check(key))) {
while (*name) {
int cmp = (**name == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 :
#endif
PyUnicode_Compare(**name, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
int cmp = (**argname == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 :
#endif
PyUnicode_Compare(**argname, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) goto arg_passed_twice;
argname++;
}
}
} else
goto invalid_keyword_type;
if (kwds2) {
if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad;
} else {
goto invalid_keyword;
}
}
return 0;
arg_passed_twice:
__Pyx_RaiseDoubleKeywordsError(function_name, key);
goto bad;
invalid_keyword_type:
PyErr_Format(PyExc_TypeError,
"%.200s() keywords must be strings", function_name);
goto bad;
invalid_keyword:
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION < 3
"%.200s() got an unexpected keyword argument '%.200s'",
function_name, PyString_AsString(key));
#else
"%s() got an unexpected keyword argument '%U'",
function_name, key);
#endif
bad:
return -1;
}
/* ArgTypeTest */
static void __Pyx_RaiseArgumentTypeInvalid(const char* name, PyObject *obj, PyTypeObject *type) {
PyErr_Format(PyExc_TypeError,
"Argument '%.200s' has incorrect type (expected %.200s, got %.200s)",
name, type->tp_name, Py_TYPE(obj)->tp_name);
}
static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed,
const char *name, int exact)
{
if (unlikely(!type)) {
PyErr_SetString(PyExc_SystemError, "Missing type object");
return 0;
}
if (none_allowed && obj == Py_None) return 1;
else if (exact) {
if (likely(Py_TYPE(obj) == type)) return 1;
#if PY_MAJOR_VERSION == 2
else if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1;
#endif
}
else {
if (likely(PyObject_TypeCheck(obj, type))) return 1;
}
__Pyx_RaiseArgumentTypeInvalid(name, obj, type);
return 0;
}
/* BufferFormatCheck */
static CYTHON_INLINE int __Pyx_IsLittleEndian(void) {
unsigned int n = 1;
return *(unsigned char*)(&n) != 0;
}
static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
__Pyx_BufFmt_StackElem* stack,
__Pyx_TypeInfo* type) {
stack[0].field = &ctx->root;
stack[0].parent_offset = 0;
ctx->root.type = type;
ctx->root.name = "buffer dtype";
ctx->root.offset = 0;
ctx->head = stack;
ctx->head->field = &ctx->root;
ctx->fmt_offset = 0;
ctx->head->parent_offset = 0;
ctx->new_packmode = '@';
ctx->enc_packmode = '@';
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->is_complex = 0;
ctx->is_valid_array = 0;
ctx->struct_alignment = 0;
while (type->typegroup == 'S') {
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = 0;
type = type->fields->type;
}
}
static int __Pyx_BufFmt_ParseNumber(const char** ts) {
int count;
const char* t = *ts;
if (*t < '0' || *t > '9') {
return -1;
} else {
count = *t++ - '0';
while (*t >= '0' && *t < '9') {
count *= 10;
count += *t++ - '0';
}
}
*ts = t;
return count;
}
static int __Pyx_BufFmt_ExpectNumber(const char **ts) {
int number = __Pyx_BufFmt_ParseNumber(ts);
if (number == -1)
PyErr_Format(PyExc_ValueError,\
"Does not understand character buffer dtype format string ('%c')", **ts);
return number;
}
static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) {
PyErr_Format(PyExc_ValueError,
"Unexpected format string character: '%c'", ch);
}
static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) {
switch (ch) {
case 'c': return "'char'";
case 'b': return "'signed char'";
case 'B': return "'unsigned char'";
case 'h': return "'short'";
case 'H': return "'unsigned short'";
case 'i': return "'int'";
case 'I': return "'unsigned int'";
case 'l': return "'long'";
case 'L': return "'unsigned long'";
case 'q': return "'long long'";
case 'Q': return "'unsigned long long'";
case 'f': return (is_complex ? "'complex float'" : "'float'");
case 'd': return (is_complex ? "'complex double'" : "'double'");
case 'g': return (is_complex ? "'complex long double'" : "'long double'");
case 'T': return "a struct";
case 'O': return "Python object";
case 'P': return "a pointer";
case 's': case 'p': return "a string";
case 0: return "end";
default: return "unparseable format string";
}
}
static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return 2;
case 'i': case 'I': case 'l': case 'L': return 4;
case 'q': case 'Q': return 8;
case 'f': return (is_complex ? 8 : 4);
case 'd': return (is_complex ? 16 : 8);
case 'g': {
PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g')..");
return 0;
}
case 'O': case 'P': return sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) {
switch (ch) {
case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(short);
case 'i': case 'I': return sizeof(int);
case 'l': case 'L': return sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(float) * (is_complex ? 2 : 1);
case 'd': return sizeof(double) * (is_complex ? 2 : 1);
case 'g': return sizeof(long double) * (is_complex ? 2 : 1);
case 'O': case 'P': return sizeof(void*);
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
typedef struct { char c; short x; } __Pyx_st_short;
typedef struct { char c; int x; } __Pyx_st_int;
typedef struct { char c; long x; } __Pyx_st_long;
typedef struct { char c; float x; } __Pyx_st_float;
typedef struct { char c; double x; } __Pyx_st_double;
typedef struct { char c; long double x; } __Pyx_st_longdouble;
typedef struct { char c; void *x; } __Pyx_st_void_p;
#ifdef HAVE_LONG_LONG
typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong;
#endif
static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short);
case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int);
case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(__Pyx_st_float) - sizeof(float);
case 'd': return sizeof(__Pyx_st_double) - sizeof(double);
case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double);
case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
/* These are for computing the padding at the end of the struct to align
on the first member of the struct. This will probably the same as above,
but we don't have any guarantees.
*/
typedef struct { short x; char c; } __Pyx_pad_short;
typedef struct { int x; char c; } __Pyx_pad_int;
typedef struct { long x; char c; } __Pyx_pad_long;
typedef struct { float x; char c; } __Pyx_pad_float;
typedef struct { double x; char c; } __Pyx_pad_double;
typedef struct { long double x; char c; } __Pyx_pad_longdouble;
typedef struct { void *x; char c; } __Pyx_pad_void_p;
#ifdef HAVE_LONG_LONG
typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong;
#endif
static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short);
case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int);
case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(__Pyx_pad_float) - sizeof(float);
case 'd': return sizeof(__Pyx_pad_double) - sizeof(double);
case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double);
case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) {
switch (ch) {
case 'c':
return 'H';
case 'b': case 'h': case 'i':
case 'l': case 'q': case 's': case 'p':
return 'I';
case 'B': case 'H': case 'I': case 'L': case 'Q':
return 'U';
case 'f': case 'd': case 'g':
return (is_complex ? 'C' : 'R');
case 'O':
return 'O';
case 'P':
return 'P';
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) {
if (ctx->head == NULL || ctx->head->field == &ctx->root) {
const char* expected;
const char* quote;
if (ctx->head == NULL) {
expected = "end";
quote = "";
} else {
expected = ctx->head->field->type->name;
quote = "'";
}
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected %s%s%s but got %s",
quote, expected, quote,
__Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex));
} else {
__Pyx_StructField* field = ctx->head->field;
__Pyx_StructField* parent = (ctx->head - 1)->field;
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'",
field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex),
parent->type->name, field->name);
}
}
static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) {
char group;
size_t size, offset, arraysize = 1;
if (ctx->enc_type == 0) return 0;
if (ctx->head->field->type->arraysize[0]) {
int i, ndim = 0;
if (ctx->enc_type == 's' || ctx->enc_type == 'p') {
ctx->is_valid_array = ctx->head->field->type->ndim == 1;
ndim = 1;
if (ctx->enc_count != ctx->head->field->type->arraysize[0]) {
PyErr_Format(PyExc_ValueError,
"Expected a dimension of size %zu, got %zu",
ctx->head->field->type->arraysize[0], ctx->enc_count);
return -1;
}
}
if (!ctx->is_valid_array) {
PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d",
ctx->head->field->type->ndim, ndim);
return -1;
}
for (i = 0; i < ctx->head->field->type->ndim; i++) {
arraysize *= ctx->head->field->type->arraysize[i];
}
ctx->is_valid_array = 0;
ctx->enc_count = 1;
}
group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex);
do {
__Pyx_StructField* field = ctx->head->field;
__Pyx_TypeInfo* type = field->type;
if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') {
size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex);
} else {
size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex);
}
if (ctx->enc_packmode == '@') {
size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex);
size_t align_mod_offset;
if (align_at == 0) return -1;
align_mod_offset = ctx->fmt_offset % align_at;
if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset;
if (ctx->struct_alignment == 0)
ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type,
ctx->is_complex);
}
if (type->size != size || type->typegroup != group) {
if (type->typegroup == 'C' && type->fields != NULL) {
size_t parent_offset = ctx->head->parent_offset + field->offset;
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = parent_offset;
continue;
}
if ((type->typegroup == 'H' || group == 'H') && type->size == size) {
} else {
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
}
offset = ctx->head->parent_offset + field->offset;
if (ctx->fmt_offset != offset) {
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected",
(Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset);
return -1;
}
ctx->fmt_offset += size;
if (arraysize)
ctx->fmt_offset += (arraysize - 1) * size;
--ctx->enc_count;
while (1) {
if (field == &ctx->root) {
ctx->head = NULL;
if (ctx->enc_count != 0) {
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
break;
}
ctx->head->field = ++field;
if (field->type == NULL) {
--ctx->head;
field = ctx->head->field;
continue;
} else if (field->type->typegroup == 'S') {
size_t parent_offset = ctx->head->parent_offset + field->offset;
if (field->type->fields->type == NULL) continue;
field = field->type->fields;
++ctx->head;
ctx->head->field = field;
ctx->head->parent_offset = parent_offset;
break;
} else {
break;
}
}
} while (ctx->enc_count);
ctx->enc_type = 0;
ctx->is_complex = 0;
return 0;
}
static CYTHON_INLINE PyObject *
__pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp)
{
const char *ts = *tsp;
int i = 0, number;
int ndim = ctx->head->field->type->ndim;
;
++ts;
if (ctx->new_count != 1) {
PyErr_SetString(PyExc_ValueError,
"Cannot handle repeated arrays in format string");
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
while (*ts && *ts != ')') {
switch (*ts) {
case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue;
default: break;
}
number = __Pyx_BufFmt_ExpectNumber(&ts);
if (number == -1) return NULL;
if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i])
return PyErr_Format(PyExc_ValueError,
"Expected a dimension of size %zu, got %d",
ctx->head->field->type->arraysize[i], number);
if (*ts != ',' && *ts != ')')
return PyErr_Format(PyExc_ValueError,
"Expected a comma in format string, got '%c'", *ts);
if (*ts == ',') ts++;
i++;
}
if (i != ndim)
return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d",
ctx->head->field->type->ndim, i);
if (!*ts) {
PyErr_SetString(PyExc_ValueError,
"Unexpected end of format string, expected ')'");
return NULL;
}
ctx->is_valid_array = 1;
ctx->new_count = 1;
*tsp = ++ts;
return Py_None;
}
static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) {
int got_Z = 0;
while (1) {
switch(*ts) {
case 0:
if (ctx->enc_type != 0 && ctx->head == NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
if (ctx->head != NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
return ts;
case ' ':
case '\r':
case '\n':
++ts;
break;
case '<':
if (!__Pyx_IsLittleEndian()) {
PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler");
return NULL;
}
ctx->new_packmode = '=';
++ts;
break;
case '>':
case '!':
if (__Pyx_IsLittleEndian()) {
PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler");
return NULL;
}
ctx->new_packmode = '=';
++ts;
break;
case '=':
case '@':
case '^':
ctx->new_packmode = *ts++;
break;
case 'T':
{
const char* ts_after_sub;
size_t i, struct_count = ctx->new_count;
size_t struct_alignment = ctx->struct_alignment;
ctx->new_count = 1;
++ts;
if (*ts != '{') {
PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'");
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_type = 0;
ctx->enc_count = 0;
ctx->struct_alignment = 0;
++ts;
ts_after_sub = ts;
for (i = 0; i != struct_count; ++i) {
ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts);
if (!ts_after_sub) return NULL;
}
ts = ts_after_sub;
if (struct_alignment) ctx->struct_alignment = struct_alignment;
}
break;
case '}':
{
size_t alignment = ctx->struct_alignment;
++ts;
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_type = 0;
if (alignment && ctx->fmt_offset % alignment) {
ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment);
}
}
return ts;
case 'x':
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->fmt_offset += ctx->new_count;
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->enc_packmode = ctx->new_packmode;
++ts;
break;
case 'Z':
got_Z = 1;
++ts;
if (*ts != 'f' && *ts != 'd' && *ts != 'g') {
__Pyx_BufFmt_RaiseUnexpectedChar('Z');
return NULL;
}
case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I':
case 'l': case 'L': case 'q': case 'Q':
case 'f': case 'd': case 'g':
case 'O': case 'p':
if (ctx->enc_type == *ts && got_Z == ctx->is_complex &&
ctx->enc_packmode == ctx->new_packmode) {
ctx->enc_count += ctx->new_count;
ctx->new_count = 1;
got_Z = 0;
++ts;
break;
}
case 's':
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_count = ctx->new_count;
ctx->enc_packmode = ctx->new_packmode;
ctx->enc_type = *ts;
ctx->is_complex = got_Z;
++ts;
ctx->new_count = 1;
got_Z = 0;
break;
case ':':
++ts;
while(*ts != ':') ++ts;
++ts;
break;
case '(':
if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL;
break;
default:
{
int number = __Pyx_BufFmt_ExpectNumber(&ts);
if (number == -1) return NULL;
ctx->new_count = (size_t)number;
}
}
}
}
static CYTHON_INLINE void __Pyx_ZeroBuffer(Py_buffer* buf) {
buf->buf = NULL;
buf->obj = NULL;
buf->strides = __Pyx_zeros;
buf->shape = __Pyx_zeros;
buf->suboffsets = __Pyx_minusones;
}
static CYTHON_INLINE int __Pyx_GetBufferAndValidate(
Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags,
int nd, int cast, __Pyx_BufFmt_StackElem* stack)
{
if (obj == Py_None || obj == NULL) {
__Pyx_ZeroBuffer(buf);
return 0;
}
buf->buf = NULL;
if (__Pyx_GetBuffer(obj, buf, flags) == -1) goto fail;
if (buf->ndim != nd) {
PyErr_Format(PyExc_ValueError,
"Buffer has wrong number of dimensions (expected %d, got %d)",
nd, buf->ndim);
goto fail;
}
if (!cast) {
__Pyx_BufFmt_Context ctx;
__Pyx_BufFmt_Init(&ctx, stack, dtype);
if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail;
}
if ((unsigned)buf->itemsize != dtype->size) {
PyErr_Format(PyExc_ValueError,
"Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "d byte%s) does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "d byte%s)",
buf->itemsize, (buf->itemsize > 1) ? "s" : "",
dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : "");
goto fail;
}
if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones;
return 0;
fail:;
__Pyx_ZeroBuffer(buf);
return -1;
}
static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) {
if (info->buf == NULL) return;
if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL;
__Pyx_ReleaseBuffer(info);
}
/* GetBuiltinName */
static PyObject *__Pyx_GetBuiltinName(PyObject *name) {
PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name);
if (unlikely(!result)) {
PyErr_Format(PyExc_NameError,
#if PY_MAJOR_VERSION >= 3
"name '%U' is not defined", name);
#else
"name '%.200s' is not defined", PyString_AS_STRING(name));
#endif
}
return result;
}
/* GetModuleGlobalName */
static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name) {
PyObject *result;
#if CYTHON_COMPILING_IN_CPYTHON
result = PyDict_GetItem(__pyx_d, name);
if (likely(result)) {
Py_INCREF(result);
} else {
#else
result = PyObject_GetItem(__pyx_d, name);
if (!result) {
PyErr_Clear();
#endif
result = __Pyx_GetBuiltinName(name);
}
return result;
}
/* PyObjectCall */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) {
PyObject *result;
ternaryfunc call = func->ob_type->tp_call;
if (unlikely(!call))
return PyObject_Call(func, arg, kw);
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
result = (*call)(func, arg, kw);
Py_LeaveRecursiveCall();
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
/* ExtTypeTest */
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) {
if (unlikely(!type)) {
PyErr_SetString(PyExc_SystemError, "Missing type object");
return 0;
}
if (likely(PyObject_TypeCheck(obj, type)))
return 1;
PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s",
Py_TYPE(obj)->tp_name, type->tp_name);
return 0;
}
/* PyObjectCallMethO */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) {
PyObject *self, *result;
PyCFunction cfunc;
cfunc = PyCFunction_GET_FUNCTION(func);
self = PyCFunction_GET_SELF(func);
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
result = cfunc(self, arg);
Py_LeaveRecursiveCall();
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
/* PyObjectCallOneArg */
#if CYTHON_COMPILING_IN_CPYTHON
static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) {
PyObject *result;
PyObject *args = PyTuple_New(1);
if (unlikely(!args)) return NULL;
Py_INCREF(arg);
PyTuple_SET_ITEM(args, 0, arg);
result = __Pyx_PyObject_Call(func, args, NULL);
Py_DECREF(args);
return result;
}
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
#ifdef __Pyx_CyFunction_USED
if (likely(PyCFunction_Check(func) || PyObject_TypeCheck(func, __pyx_CyFunctionType))) {
#else
if (likely(PyCFunction_Check(func))) {
#endif
if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) {
return __Pyx_PyObject_CallMethO(func, arg);
}
}
return __Pyx__PyObject_CallOneArg(func, arg);
}
#else
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
PyObject *result;
PyObject *args = PyTuple_Pack(1, arg);
if (unlikely(!args)) return NULL;
result = __Pyx_PyObject_Call(func, args, NULL);
Py_DECREF(args);
return result;
}
#endif
/* PyObjectCallNoArg */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func) {
#ifdef __Pyx_CyFunction_USED
if (likely(PyCFunction_Check(func) || PyObject_TypeCheck(func, __pyx_CyFunctionType))) {
#else
if (likely(PyCFunction_Check(func))) {
#endif
if (likely(PyCFunction_GET_FLAGS(func) & METH_NOARGS)) {
return __Pyx_PyObject_CallMethO(func, NULL);
}
}
return __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL);
}
#endif
/* BufferIndexError */
static void __Pyx_RaiseBufferIndexError(int axis) {
PyErr_Format(PyExc_IndexError,
"Out of bounds on buffer access (axis %d)", axis);
}
/* SliceObject */
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice(PyObject* obj,
Py_ssize_t cstart, Py_ssize_t cstop,
PyObject** _py_start, PyObject** _py_stop, PyObject** _py_slice,
int has_cstart, int has_cstop, CYTHON_UNUSED int wraparound) {
#if CYTHON_COMPILING_IN_CPYTHON
PyMappingMethods* mp;
#if PY_MAJOR_VERSION < 3
PySequenceMethods* ms = Py_TYPE(obj)->tp_as_sequence;
if (likely(ms && ms->sq_slice)) {
if (!has_cstart) {
if (_py_start && (*_py_start != Py_None)) {
cstart = __Pyx_PyIndex_AsSsize_t(*_py_start);
if ((cstart == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad;
} else
cstart = 0;
}
if (!has_cstop) {
if (_py_stop && (*_py_stop != Py_None)) {
cstop = __Pyx_PyIndex_AsSsize_t(*_py_stop);
if ((cstop == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad;
} else
cstop = PY_SSIZE_T_MAX;
}
if (wraparound && unlikely((cstart < 0) | (cstop < 0)) && likely(ms->sq_length)) {
Py_ssize_t l = ms->sq_length(obj);
if (likely(l >= 0)) {
if (cstop < 0) {
cstop += l;
if (cstop < 0) cstop = 0;
}
if (cstart < 0) {
cstart += l;
if (cstart < 0) cstart = 0;
}
} else {
if (!PyErr_ExceptionMatches(PyExc_OverflowError))
goto bad;
PyErr_Clear();
}
}
return ms->sq_slice(obj, cstart, cstop);
}
#endif
mp = Py_TYPE(obj)->tp_as_mapping;
if (likely(mp && mp->mp_subscript))
#endif
{
PyObject* result;
PyObject *py_slice, *py_start, *py_stop;
if (_py_slice) {
py_slice = *_py_slice;
} else {
PyObject* owned_start = NULL;
PyObject* owned_stop = NULL;
if (_py_start) {
py_start = *_py_start;
} else {
if (has_cstart) {
owned_start = py_start = PyInt_FromSsize_t(cstart);
if (unlikely(!py_start)) goto bad;
} else
py_start = Py_None;
}
if (_py_stop) {
py_stop = *_py_stop;
} else {
if (has_cstop) {
owned_stop = py_stop = PyInt_FromSsize_t(cstop);
if (unlikely(!py_stop)) {
Py_XDECREF(owned_start);
goto bad;
}
} else
py_stop = Py_None;
}
py_slice = PySlice_New(py_start, py_stop, Py_None);
Py_XDECREF(owned_start);
Py_XDECREF(owned_stop);
if (unlikely(!py_slice)) goto bad;
}
#if CYTHON_COMPILING_IN_CPYTHON
result = mp->mp_subscript(obj, py_slice);
#else
result = PyObject_GetItem(obj, py_slice);
#endif
if (!_py_slice) {
Py_DECREF(py_slice);
}
return result;
}
PyErr_Format(PyExc_TypeError,
"'%.200s' object is unsliceable", Py_TYPE(obj)->tp_name);
bad:
return NULL;
}
/* BufferFallbackError */
static void __Pyx_RaiseBufferFallbackError(void) {
PyErr_SetString(PyExc_ValueError,
"Buffer acquisition failed on assignment; and then reacquiring the old buffer failed too!");
}
/* PyErrFetchRestore */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
tmp_type = tstate->curexc_type;
tmp_value = tstate->curexc_value;
tmp_tb = tstate->curexc_traceback;
tstate->curexc_type = type;
tstate->curexc_value = value;
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
}
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
*type = tstate->curexc_type;
*value = tstate->curexc_value;
*tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
}
#endif
/* RaiseException */
#if PY_MAJOR_VERSION < 3
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb,
CYTHON_UNUSED PyObject *cause) {
__Pyx_PyThreadState_declare
Py_XINCREF(type);
if (!value || value == Py_None)
value = NULL;
else
Py_INCREF(value);
if (!tb || tb == Py_None)
tb = NULL;
else {
Py_INCREF(tb);
if (!PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto raise_error;
}
}
if (PyType_Check(type)) {
#if CYTHON_COMPILING_IN_PYPY
if (!value) {
Py_INCREF(Py_None);
value = Py_None;
}
#endif
PyErr_NormalizeException(&type, &value, &tb);
} else {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto raise_error;
}
value = type;
type = (PyObject*) Py_TYPE(type);
Py_INCREF(type);
if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto raise_error;
}
}
__Pyx_PyThreadState_assign
__Pyx_ErrRestore(type, value, tb);
return;
raise_error:
Py_XDECREF(value);
Py_XDECREF(type);
Py_XDECREF(tb);
return;
}
#else
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) {
PyObject* owned_instance = NULL;
if (tb == Py_None) {
tb = 0;
} else if (tb && !PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto bad;
}
if (value == Py_None)
value = 0;
if (PyExceptionInstance_Check(type)) {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto bad;
}
value = type;
type = (PyObject*) Py_TYPE(value);
} else if (PyExceptionClass_Check(type)) {
PyObject *instance_class = NULL;
if (value && PyExceptionInstance_Check(value)) {
instance_class = (PyObject*) Py_TYPE(value);
if (instance_class != type) {
int is_subclass = PyObject_IsSubclass(instance_class, type);
if (!is_subclass) {
instance_class = NULL;
} else if (unlikely(is_subclass == -1)) {
goto bad;
} else {
type = instance_class;
}
}
}
if (!instance_class) {
PyObject *args;
if (!value)
args = PyTuple_New(0);
else if (PyTuple_Check(value)) {
Py_INCREF(value);
args = value;
} else
args = PyTuple_Pack(1, value);
if (!args)
goto bad;
owned_instance = PyObject_Call(type, args, NULL);
Py_DECREF(args);
if (!owned_instance)
goto bad;
value = owned_instance;
if (!PyExceptionInstance_Check(value)) {
PyErr_Format(PyExc_TypeError,
"calling %R should have returned an instance of "
"BaseException, not %R",
type, Py_TYPE(value));
goto bad;
}
}
} else {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto bad;
}
#if PY_VERSION_HEX >= 0x03030000
if (cause) {
#else
if (cause && cause != Py_None) {
#endif
PyObject *fixed_cause;
if (cause == Py_None) {
fixed_cause = NULL;
} else if (PyExceptionClass_Check(cause)) {
fixed_cause = PyObject_CallObject(cause, NULL);
if (fixed_cause == NULL)
goto bad;
} else if (PyExceptionInstance_Check(cause)) {
fixed_cause = cause;
Py_INCREF(fixed_cause);
} else {
PyErr_SetString(PyExc_TypeError,
"exception causes must derive from "
"BaseException");
goto bad;
}
PyException_SetCause(value, fixed_cause);
}
PyErr_SetObject(type, value);
if (tb) {
#if CYTHON_COMPILING_IN_PYPY
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb);
Py_INCREF(tb);
PyErr_Restore(tmp_type, tmp_value, tb);
Py_XDECREF(tmp_tb);
#else
PyThreadState *tstate = PyThreadState_GET();
PyObject* tmp_tb = tstate->curexc_traceback;
if (tb != tmp_tb) {
Py_INCREF(tb);
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_tb);
}
#endif
}
bad:
Py_XDECREF(owned_instance);
return;
}
#endif
/* RaiseTooManyValuesToUnpack */
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) {
PyErr_Format(PyExc_ValueError,
"too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected);
}
/* RaiseNeedMoreValuesToUnpack */
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) {
PyErr_Format(PyExc_ValueError,
"need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack",
index, (index == 1) ? "" : "s");
}
/* RaiseNoneIterError */
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
}
/* Import */
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) {
PyObject *empty_list = 0;
PyObject *module = 0;
PyObject *global_dict = 0;
PyObject *empty_dict = 0;
PyObject *list;
#if PY_VERSION_HEX < 0x03030000
PyObject *py_import;
py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import);
if (!py_import)
goto bad;
#endif
if (from_list)
list = from_list;
else {
empty_list = PyList_New(0);
if (!empty_list)
goto bad;
list = empty_list;
}
global_dict = PyModule_GetDict(__pyx_m);
if (!global_dict)
goto bad;
empty_dict = PyDict_New();
if (!empty_dict)
goto bad;
{
#if PY_MAJOR_VERSION >= 3
if (level == -1) {
if (strchr(__Pyx_MODULE_NAME, '.')) {
#if PY_VERSION_HEX < 0x03030000
PyObject *py_level = PyInt_FromLong(1);
if (!py_level)
goto bad;
module = PyObject_CallFunctionObjArgs(py_import,
name, global_dict, empty_dict, list, py_level, NULL);
Py_DECREF(py_level);
#else
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, 1);
#endif
if (!module) {
if (!PyErr_ExceptionMatches(PyExc_ImportError))
goto bad;
PyErr_Clear();
}
}
level = 0;
}
#endif
if (!module) {
#if PY_VERSION_HEX < 0x03030000
PyObject *py_level = PyInt_FromLong(level);
if (!py_level)
goto bad;
module = PyObject_CallFunctionObjArgs(py_import,
name, global_dict, empty_dict, list, py_level, NULL);
Py_DECREF(py_level);
#else
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, level);
#endif
}
}
bad:
#if PY_VERSION_HEX < 0x03030000
Py_XDECREF(py_import);
#endif
Py_XDECREF(empty_list);
Py_XDECREF(empty_dict);
return module;
}
/* CodeObjectCache */
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) {
int start = 0, mid = 0, end = count - 1;
if (end >= 0 && code_line > entries[end].code_line) {
return count;
}
while (start < end) {
mid = start + (end - start) / 2;
if (code_line < entries[mid].code_line) {
end = mid;
} else if (code_line > entries[mid].code_line) {
start = mid + 1;
} else {
return mid;
}
}
if (code_line <= entries[mid].code_line) {
return mid;
} else {
return mid + 1;
}
}
static PyCodeObject *__pyx_find_code_object(int code_line) {
PyCodeObject* code_object;
int pos;
if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) {
return NULL;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) {
return NULL;
}
code_object = __pyx_code_cache.entries[pos].code_object;
Py_INCREF(code_object);
return code_object;
}
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) {
int pos, i;
__Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries;
if (unlikely(!code_line)) {
return;
}
if (unlikely(!entries)) {
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry));
if (likely(entries)) {
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = 64;
__pyx_code_cache.count = 1;
entries[0].code_line = code_line;
entries[0].code_object = code_object;
Py_INCREF(code_object);
}
return;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) {
PyCodeObject* tmp = entries[pos].code_object;
entries[pos].code_object = code_object;
Py_DECREF(tmp);
return;
}
if (__pyx_code_cache.count == __pyx_code_cache.max_count) {
int new_max = __pyx_code_cache.max_count + 64;
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc(
__pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry));
if (unlikely(!entries)) {
return;
}
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = new_max;
}
for (i=__pyx_code_cache.count; i>pos; i--) {
entries[i] = entries[i-1];
}
entries[pos].code_line = code_line;
entries[pos].code_object = code_object;
__pyx_code_cache.count++;
Py_INCREF(code_object);
}
/* AddTraceback */
#include "compile.h"
#include "frameobject.h"
#include "traceback.h"
static PyCodeObject* __Pyx_CreateCodeObjectForTraceback(
const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyObject *py_srcfile = 0;
PyObject *py_funcname = 0;
#if PY_MAJOR_VERSION < 3
py_srcfile = PyString_FromString(filename);
#else
py_srcfile = PyUnicode_FromString(filename);
#endif
if (!py_srcfile) goto bad;
if (c_line) {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
#else
py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
#endif
}
else {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromString(funcname);
#else
py_funcname = PyUnicode_FromString(funcname);
#endif
}
if (!py_funcname) goto bad;
py_code = __Pyx_PyCode_New(
0,
0,
0,
0,
0,
__pyx_empty_bytes, /*PyObject *code,*/
__pyx_empty_tuple, /*PyObject *consts,*/
__pyx_empty_tuple, /*PyObject *names,*/
__pyx_empty_tuple, /*PyObject *varnames,*/
__pyx_empty_tuple, /*PyObject *freevars,*/
__pyx_empty_tuple, /*PyObject *cellvars,*/
py_srcfile, /*PyObject *filename,*/
py_funcname, /*PyObject *name,*/
py_line,
__pyx_empty_bytes /*PyObject *lnotab*/
);
Py_DECREF(py_srcfile);
Py_DECREF(py_funcname);
return py_code;
bad:
Py_XDECREF(py_srcfile);
Py_XDECREF(py_funcname);
return NULL;
}
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyFrameObject *py_frame = 0;
py_code = __pyx_find_code_object(c_line ? c_line : py_line);
if (!py_code) {
py_code = __Pyx_CreateCodeObjectForTraceback(
funcname, c_line, py_line, filename);
if (!py_code) goto bad;
__pyx_insert_code_object(c_line ? c_line : py_line, py_code);
}
py_frame = PyFrame_New(
PyThreadState_GET(), /*PyThreadState *tstate,*/
py_code, /*PyCodeObject *code,*/
__pyx_d, /*PyObject *globals,*/
0 /*PyObject *locals*/
);
if (!py_frame) goto bad;
py_frame->f_lineno = py_line;
PyTraceBack_Here(py_frame);
bad:
Py_XDECREF(py_code);
Py_XDECREF(py_frame);
}
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) {
if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags);
if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) return __pyx_pw_5numpy_7ndarray_1__getbuffer__(obj, view, flags);
PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name);
return -1;
}
static void __Pyx_ReleaseBuffer(Py_buffer *view) {
PyObject *obj = view->obj;
if (!obj) return;
if (PyObject_CheckBuffer(obj)) {
PyBuffer_Release(view);
return;
}
if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) { __pyx_pw_5numpy_7ndarray_3__releasebuffer__(obj, view); return; }
Py_DECREF(obj);
view->obj = NULL;
}
#endif
/* CIntFromPyVerify */
#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\
__PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0)
#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\
__PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1)
#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\
{\
func_type value = func_value;\
if (sizeof(target_type) < sizeof(func_type)) {\
if (unlikely(value != (func_type) (target_type) value)) {\
func_type zero = 0;\
if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\
return (target_type) -1;\
if (is_unsigned && unlikely(value < zero))\
goto raise_neg_overflow;\
else\
goto raise_overflow;\
}\
}\
return (target_type) value;\
}
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) {
const int neg_one = (int) -1, const_zero = (int) 0;
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(int) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(int) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
} else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
}
} else {
if (sizeof(int) <= sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(int),
little, !is_unsigned);
}
}
/* None */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
return ::std::complex< float >(x, y);
}
#else
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
return x + y*(__pyx_t_float_complex)_Complex_I;
}
#endif
#else
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
__pyx_t_float_complex z;
z.real = x;
z.imag = y;
return z;
}
#endif
/* None */
#if CYTHON_CCOMPLEX
#else
static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex a, __pyx_t_float_complex b) {
return (a.real == b.real) && (a.imag == b.imag);
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
z.real = a.real + b.real;
z.imag = a.imag + b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
z.real = a.real - b.real;
z.imag = a.imag - b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
z.real = a.real * b.real - a.imag * b.imag;
z.imag = a.real * b.imag + a.imag * b.real;
return z;
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
float denom = b.real * b.real + b.imag * b.imag;
z.real = (a.real * b.real + a.imag * b.imag) / denom;
z.imag = (a.imag * b.real - a.real * b.imag) / denom;
return z;
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex a) {
__pyx_t_float_complex z;
z.real = -a.real;
z.imag = -a.imag;
return z;
}
static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex a) {
return (a.real == 0) && (a.imag == 0);
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex a) {
__pyx_t_float_complex z;
z.real = a.real;
z.imag = -a.imag;
return z;
}
#if 1
static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex z) {
#if !defined(HAVE_HYPOT) || defined(_MSC_VER)
return sqrtf(z.real*z.real + z.imag*z.imag);
#else
return hypotf(z.real, z.imag);
#endif
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
float r, lnr, theta, z_r, z_theta;
if (b.imag == 0 && b.real == (int)b.real) {
if (b.real < 0) {
float denom = a.real * a.real + a.imag * a.imag;
a.real = a.real / denom;
a.imag = -a.imag / denom;
b.real = -b.real;
}
switch ((int)b.real) {
case 0:
z.real = 1;
z.imag = 0;
return z;
case 1:
return a;
case 2:
z = __Pyx_c_prodf(a, a);
return __Pyx_c_prodf(a, a);
case 3:
z = __Pyx_c_prodf(a, a);
return __Pyx_c_prodf(z, a);
case 4:
z = __Pyx_c_prodf(a, a);
return __Pyx_c_prodf(z, z);
}
}
if (a.imag == 0) {
if (a.real == 0) {
return a;
}
r = a.real;
theta = 0;
} else {
r = __Pyx_c_absf(a);
theta = atan2f(a.imag, a.real);
}
lnr = logf(r);
z_r = expf(lnr * b.real - theta * b.imag);
z_theta = theta * b.real + lnr * b.imag;
z.real = z_r * cosf(z_theta);
z.imag = z_r * sinf(z_theta);
return z;
}
#endif
#endif
/* None */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
return ::std::complex< double >(x, y);
}
#else
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
return x + y*(__pyx_t_double_complex)_Complex_I;
}
#endif
#else
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
__pyx_t_double_complex z;
z.real = x;
z.imag = y;
return z;
}
#endif
/* None */
#if CYTHON_CCOMPLEX
#else
static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex a, __pyx_t_double_complex b) {
return (a.real == b.real) && (a.imag == b.imag);
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
z.real = a.real + b.real;
z.imag = a.imag + b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
z.real = a.real - b.real;
z.imag = a.imag - b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
z.real = a.real * b.real - a.imag * b.imag;
z.imag = a.real * b.imag + a.imag * b.real;
return z;
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
double denom = b.real * b.real + b.imag * b.imag;
z.real = (a.real * b.real + a.imag * b.imag) / denom;
z.imag = (a.imag * b.real - a.real * b.imag) / denom;
return z;
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex a) {
__pyx_t_double_complex z;
z.real = -a.real;
z.imag = -a.imag;
return z;
}
static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex a) {
return (a.real == 0) && (a.imag == 0);
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex a) {
__pyx_t_double_complex z;
z.real = a.real;
z.imag = -a.imag;
return z;
}
#if 1
static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex z) {
#if !defined(HAVE_HYPOT) || defined(_MSC_VER)
return sqrt(z.real*z.real + z.imag*z.imag);
#else
return hypot(z.real, z.imag);
#endif
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
double r, lnr, theta, z_r, z_theta;
if (b.imag == 0 && b.real == (int)b.real) {
if (b.real < 0) {
double denom = a.real * a.real + a.imag * a.imag;
a.real = a.real / denom;
a.imag = -a.imag / denom;
b.real = -b.real;
}
switch ((int)b.real) {
case 0:
z.real = 1;
z.imag = 0;
return z;
case 1:
return a;
case 2:
z = __Pyx_c_prod(a, a);
return __Pyx_c_prod(a, a);
case 3:
z = __Pyx_c_prod(a, a);
return __Pyx_c_prod(z, a);
case 4:
z = __Pyx_c_prod(a, a);
return __Pyx_c_prod(z, z);
}
}
if (a.imag == 0) {
if (a.real == 0) {
return a;
}
r = a.real;
theta = 0;
} else {
r = __Pyx_c_abs(a);
theta = atan2(a.imag, a.real);
}
lnr = log(r);
z_r = exp(lnr * b.real - theta * b.imag);
z_theta = theta * b.real + lnr * b.imag;
z.real = z_r * cos(z_theta);
z.imag = z_r * sin(z_theta);
return z;
}
#endif
#endif
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value) {
const enum NPY_TYPES neg_one = (enum NPY_TYPES) -1, const_zero = (enum NPY_TYPES) 0;
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(enum NPY_TYPES) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
} else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
}
} else {
if (sizeof(enum NPY_TYPES) <= sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(enum NPY_TYPES) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(enum NPY_TYPES),
little, !is_unsigned);
}
}
/* CIntFromPy */
static CYTHON_INLINE npy_int32 __Pyx_PyInt_As_npy_int32(PyObject *x) {
const npy_int32 neg_one = (npy_int32) -1, const_zero = (npy_int32) 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(npy_int32) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(npy_int32, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (npy_int32) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (npy_int32) 0;
case 1: __PYX_VERIFY_RETURN_INT(npy_int32, digit, digits[0])
case 2:
if (8 * sizeof(npy_int32) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(npy_int32, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(npy_int32) >= 2 * PyLong_SHIFT) {
return (npy_int32) (((((npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(npy_int32) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(npy_int32, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(npy_int32) >= 3 * PyLong_SHIFT) {
return (npy_int32) (((((((npy_int32)digits[2]) << PyLong_SHIFT) | (npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(npy_int32) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(npy_int32, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(npy_int32) >= 4 * PyLong_SHIFT) {
return (npy_int32) (((((((((npy_int32)digits[3]) << PyLong_SHIFT) | (npy_int32)digits[2]) << PyLong_SHIFT) | (npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (npy_int32) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(npy_int32) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(npy_int32, unsigned long, PyLong_AsUnsignedLong(x))
} else if (sizeof(npy_int32) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(npy_int32, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (npy_int32) 0;
case -1: __PYX_VERIFY_RETURN_INT(npy_int32, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(npy_int32, digit, +digits[0])
case -2:
if (8 * sizeof(npy_int32) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(npy_int32, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(npy_int32) - 1 > 2 * PyLong_SHIFT) {
return (npy_int32) (((npy_int32)-1)*(((((npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(npy_int32) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(npy_int32, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(npy_int32) - 1 > 2 * PyLong_SHIFT) {
return (npy_int32) ((((((npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(npy_int32) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(npy_int32, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(npy_int32) - 1 > 3 * PyLong_SHIFT) {
return (npy_int32) (((npy_int32)-1)*(((((((npy_int32)digits[2]) << PyLong_SHIFT) | (npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(npy_int32) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(npy_int32, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(npy_int32) - 1 > 3 * PyLong_SHIFT) {
return (npy_int32) ((((((((npy_int32)digits[2]) << PyLong_SHIFT) | (npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(npy_int32) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(npy_int32, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(npy_int32) - 1 > 4 * PyLong_SHIFT) {
return (npy_int32) (((npy_int32)-1)*(((((((((npy_int32)digits[3]) << PyLong_SHIFT) | (npy_int32)digits[2]) << PyLong_SHIFT) | (npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(npy_int32) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(npy_int32, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(npy_int32) - 1 > 4 * PyLong_SHIFT) {
return (npy_int32) ((((((((((npy_int32)digits[3]) << PyLong_SHIFT) | (npy_int32)digits[2]) << PyLong_SHIFT) | (npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0])));
}
}
break;
}
#endif
if (sizeof(npy_int32) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(npy_int32, long, PyLong_AsLong(x))
} else if (sizeof(npy_int32) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(npy_int32, PY_LONG_LONG, PyLong_AsLongLong(x))
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
npy_int32 val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (npy_int32) -1;
}
} else {
npy_int32 val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (npy_int32) -1;
val = __Pyx_PyInt_As_npy_int32(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to npy_int32");
return (npy_int32) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to npy_int32");
return (npy_int32) -1;
}
/* CIntFromPy */
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) {
const int neg_one = (int) -1, const_zero = (int) 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(int) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (int) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (int) 0;
case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0])
case 2:
if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) {
return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) {
return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) {
return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (int) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(int) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x))
} else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (int) 0;
case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0])
case -2:
if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
}
#endif
if (sizeof(int) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x))
} else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x))
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
int val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (int) -1;
}
} else {
int val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (int) -1;
val = __Pyx_PyInt_As_int(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to int");
return (int) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to int");
return (int) -1;
}
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) {
const long neg_one = (long) -1, const_zero = (long) 0;
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(long) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(long) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
} else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
}
} else {
if (sizeof(long) <= sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(long),
little, !is_unsigned);
}
}
/* CIntFromPy */
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) {
const long neg_one = (long) -1, const_zero = (long) 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(long) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (long) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (long) 0;
case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0])
case 2:
if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) {
return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) {
return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) {
return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (long) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(long) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x))
} else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (long) 0;
case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0])
case -2:
if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
}
#endif
if (sizeof(long) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x))
} else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x))
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
long val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (long) -1;
}
} else {
long val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (long) -1;
val = __Pyx_PyInt_As_long(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to long");
return (long) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to long");
return (long) -1;
}
/* CheckBinaryVersion */
static int __Pyx_check_binary_version(void) {
char ctversion[4], rtversion[4];
PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION);
PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion());
if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) {
char message[200];
PyOS_snprintf(message, sizeof(message),
"compiletime version %s of module '%.100s' "
"does not match runtime version %s",
ctversion, __Pyx_MODULE_NAME, rtversion);
return PyErr_WarnEx(NULL, message, 1);
}
return 0;
}
/* ModuleImport */
#ifndef __PYX_HAVE_RT_ImportModule
#define __PYX_HAVE_RT_ImportModule
static PyObject *__Pyx_ImportModule(const char *name) {
PyObject *py_name = 0;
PyObject *py_module = 0;
py_name = __Pyx_PyIdentifier_FromString(name);
if (!py_name)
goto bad;
py_module = PyImport_Import(py_name);
Py_DECREF(py_name);
return py_module;
bad:
Py_XDECREF(py_name);
return 0;
}
#endif
/* TypeImport */
#ifndef __PYX_HAVE_RT_ImportType
#define __PYX_HAVE_RT_ImportType
static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name,
size_t size, int strict)
{
PyObject *py_module = 0;
PyObject *result = 0;
PyObject *py_name = 0;
char warning[200];
Py_ssize_t basicsize;
#ifdef Py_LIMITED_API
PyObject *py_basicsize;
#endif
py_module = __Pyx_ImportModule(module_name);
if (!py_module)
goto bad;
py_name = __Pyx_PyIdentifier_FromString(class_name);
if (!py_name)
goto bad;
result = PyObject_GetAttr(py_module, py_name);
Py_DECREF(py_name);
py_name = 0;
Py_DECREF(py_module);
py_module = 0;
if (!result)
goto bad;
if (!PyType_Check(result)) {
PyErr_Format(PyExc_TypeError,
"%.200s.%.200s is not a type object",
module_name, class_name);
goto bad;
}
#ifndef Py_LIMITED_API
basicsize = ((PyTypeObject *)result)->tp_basicsize;
#else
py_basicsize = PyObject_GetAttrString(result, "__basicsize__");
if (!py_basicsize)
goto bad;
basicsize = PyLong_AsSsize_t(py_basicsize);
Py_DECREF(py_basicsize);
py_basicsize = 0;
if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred())
goto bad;
#endif
if (!strict && (size_t)basicsize > size) {
PyOS_snprintf(warning, sizeof(warning),
"%s.%s size changed, may indicate binary incompatibility. Expected %zd, got %zd",
module_name, class_name, basicsize, size);
if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad;
}
else if ((size_t)basicsize != size) {
PyErr_Format(PyExc_ValueError,
"%.200s.%.200s has the wrong size, try recompiling. Expected %zd, got %zd",
module_name, class_name, basicsize, size);
goto bad;
}
return (PyTypeObject *)result;
bad:
Py_XDECREF(py_module);
Py_XDECREF(result);
return NULL;
}
#endif
/* InitStrings */
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
while (t->p) {
#if PY_MAJOR_VERSION < 3
if (t->is_unicode) {
*t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL);
} else if (t->intern) {
*t->p = PyString_InternFromString(t->s);
} else {
*t->p = PyString_FromStringAndSize(t->s, t->n - 1);
}
#else
if (t->is_unicode | t->is_str) {
if (t->intern) {
*t->p = PyUnicode_InternFromString(t->s);
} else if (t->encoding) {
*t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL);
} else {
*t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1);
}
} else {
*t->p = PyBytes_FromStringAndSize(t->s, t->n - 1);
}
#endif
if (!*t->p)
return -1;
++t;
}
return 0;
}
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) {
return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str));
}
static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject* o) {
Py_ssize_t ignore;
return __Pyx_PyObject_AsStringAndSize(o, &ignore);
}
static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
#if CYTHON_COMPILING_IN_CPYTHON && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)
if (
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
__Pyx_sys_getdefaultencoding_not_ascii &&
#endif
PyUnicode_Check(o)) {
#if PY_VERSION_HEX < 0x03030000
char* defenc_c;
PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL);
if (!defenc) return NULL;
defenc_c = PyBytes_AS_STRING(defenc);
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
{
char* end = defenc_c + PyBytes_GET_SIZE(defenc);
char* c;
for (c = defenc_c; c < end; c++) {
if ((unsigned char) (*c) >= 128) {
PyUnicode_AsASCIIString(o);
return NULL;
}
}
}
#endif
*length = PyBytes_GET_SIZE(defenc);
return defenc_c;
#else
if (__Pyx_PyUnicode_READY(o) == -1) return NULL;
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
if (PyUnicode_IS_ASCII(o)) {
*length = PyUnicode_GET_LENGTH(o);
return PyUnicode_AsUTF8(o);
} else {
PyUnicode_AsASCIIString(o);
return NULL;
}
#else
return PyUnicode_AsUTF8AndSize(o, length);
#endif
#endif
} else
#endif
#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE))
if (PyByteArray_Check(o)) {
*length = PyByteArray_GET_SIZE(o);
return PyByteArray_AS_STRING(o);
} else
#endif
{
char* result;
int r = PyBytes_AsStringAndSize(o, &result, length);
if (unlikely(r < 0)) {
return NULL;
} else {
return result;
}
}
}
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) {
int is_true = x == Py_True;
if (is_true | (x == Py_False) | (x == Py_None)) return is_true;
else return PyObject_IsTrue(x);
}
static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) {
PyNumberMethods *m;
const char *name = NULL;
PyObject *res = NULL;
#if PY_MAJOR_VERSION < 3
if (PyInt_Check(x) || PyLong_Check(x))
#else
if (PyLong_Check(x))
#endif
return __Pyx_NewRef(x);
m = Py_TYPE(x)->tp_as_number;
#if PY_MAJOR_VERSION < 3
if (m && m->nb_int) {
name = "int";
res = PyNumber_Int(x);
}
else if (m && m->nb_long) {
name = "long";
res = PyNumber_Long(x);
}
#else
if (m && m->nb_int) {
name = "int";
res = PyNumber_Long(x);
}
#endif
if (res) {
#if PY_MAJOR_VERSION < 3
if (!PyInt_Check(res) && !PyLong_Check(res)) {
#else
if (!PyLong_Check(res)) {
#endif
PyErr_Format(PyExc_TypeError,
"__%.4s__ returned non-%.4s (type %.200s)",
name, name, Py_TYPE(res)->tp_name);
Py_DECREF(res);
return NULL;
}
}
else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_TypeError,
"an integer is required");
}
return res;
}
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
Py_ssize_t ival;
PyObject *x;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_CheckExact(b))) {
if (sizeof(Py_ssize_t) >= sizeof(long))
return PyInt_AS_LONG(b);
else
return PyInt_AsSsize_t(x);
}
#endif
if (likely(PyLong_CheckExact(b))) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)b)->ob_digit;
const Py_ssize_t size = Py_SIZE(b);
if (likely(__Pyx_sst_abs(size) <= 1)) {
ival = likely(size) ? digits[0] : 0;
if (size == -1) ival = -ival;
return ival;
} else {
switch (size) {
case 2:
if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -2:
if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case 3:
if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -3:
if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case 4:
if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -4:
if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
}
}
#endif
return PyLong_AsSsize_t(b);
}
x = PyNumber_Index(b);
if (!x) return -1;
ival = PyInt_AsSsize_t(x);
Py_DECREF(x);
return ival;
}
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) {
return PyInt_FromSize_t(ival);
}
#endif /* Py_PYTHON_H */ | ff52fe9f4494146019621687cc532b7bffe5a92f.cu | // ------------------------------------------------------------------
// Faster R-CNN
// Copyright (c) 2015 Microsoft
// Licensed under The MIT License [see fast-rcnn/LICENSE for details]
// Written by Shaoqing Ren
// ------------------------------------------------------------------
//#include "gpu_nms.hpp"
#include <vector>
#include <iostream>
#define CUDA_CHECK(condition) \
/* Code block avoids redefinition of cudaError_t error */ \
do { \
cudaError_t error = condition; \
if (error != cudaSuccess) { \
std::cout << cudaGetErrorString(error) << std::endl; \
} \
} while (0)
#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
int const threadsPerBlock = sizeof(unsigned long long) * 8;
__device__ inline float devIoU(float const * const a, float const * const b) {
float left = max(a[0], b[0]), right = min(a[2], b[2]);
float top = max(a[1], b[1]), bottom = min(a[3], b[3]);
float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f);
float interS = width * height;
float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1);
float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);
return interS / (Sa + Sb - interS);
}
__global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh,
const float *dev_boxes, unsigned long long *dev_mask) {
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size =
min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
__shared__ float block_boxes[threadsPerBlock * 5];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 5 + 0] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const float *cur_box = dev_boxes + cur_box_idx * 5;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
void _set_device(int device_id) {
int current_device;
CUDA_CHECK(cudaGetDevice(¤t_device));
if (current_device == device_id) {
return;
}
// The call to cudaSetDevice must come before any calls to Get, which
// may perform initialization using the GPU.
CUDA_CHECK(cudaSetDevice(device_id));
}
void _nms(long* keep_out, int* num_out, const float* boxes_host, int boxes_num,
int boxes_dim, float nms_overlap_thresh, int device_id) {
_set_device(device_id);
float* boxes_dev = NULL;
unsigned long long* mask_dev = NULL;
const int col_blocks = DIVUP(boxes_num, threadsPerBlock);
CUDA_CHECK(cudaMalloc(&boxes_dev,
boxes_num * boxes_dim * sizeof(float)));
CUDA_CHECK(cudaMemcpy(boxes_dev,
boxes_host,
boxes_num * boxes_dim * sizeof(float),
cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMalloc(&mask_dev,
boxes_num * col_blocks * sizeof(unsigned long long)));
dim3 blocks(DIVUP(boxes_num, threadsPerBlock),
DIVUP(boxes_num, threadsPerBlock));
dim3 threads(threadsPerBlock);
nms_kernel<<<blocks, threads>>>(boxes_num,
nms_overlap_thresh,
boxes_dev,
mask_dev);
std::vector<unsigned long long> mask_host(boxes_num * col_blocks);
CUDA_CHECK(cudaMemcpy(&mask_host[0],
mask_dev,
sizeof(unsigned long long) * boxes_num * col_blocks,
cudaMemcpyDeviceToHost));
std::vector<unsigned long long> remv(col_blocks);
memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep_out[num_to_keep++] = i;
unsigned long long *p = &mask_host[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
*num_out = num_to_keep;
CUDA_CHECK(cudaFree(boxes_dev));
CUDA_CHECK(cudaFree(mask_dev));
}
/* Generated by Cython 0.24 */
#define PY_SSIZE_T_CLEAN
#include "Python.h"
#ifndef Py_PYTHON_H
#error Python headers needed to compile C extensions, please install development version of Python.
#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03020000)
#error Cython requires Python 2.6+ or Python 3.2+.
#else
#define CYTHON_ABI "0_24"
#include <stddef.h>
#ifndef offsetof
#define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
#endif
#if !defined(WIN32) && !defined(MS_WINDOWS)
#ifndef __stdcall
#define __stdcall
#endif
#ifndef __cdecl
#define __cdecl
#endif
#ifndef __fastcall
#define __fastcall
#endif
#endif
#ifndef DL_IMPORT
#define DL_IMPORT(t) t
#endif
#ifndef DL_EXPORT
#define DL_EXPORT(t) t
#endif
#ifndef PY_LONG_LONG
#define PY_LONG_LONG LONG_LONG
#endif
#ifndef Py_HUGE_VAL
#define Py_HUGE_VAL HUGE_VAL
#endif
#ifdef PYPY_VERSION
#define CYTHON_COMPILING_IN_PYPY 1
#define CYTHON_COMPILING_IN_CPYTHON 0
#else
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_CPYTHON 1
#endif
#if !defined(CYTHON_USE_PYLONG_INTERNALS) && CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x02070000
#define CYTHON_USE_PYLONG_INTERNALS 1
#endif
#if CYTHON_USE_PYLONG_INTERNALS
#include "longintrepr.h"
#undef SHIFT
#undef BASE
#undef MASK
#endif
#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag)
#define Py_OptimizeFlag 0
#endif
#define __PYX_BUILD_PY_SSIZE_T "n"
#define CYTHON_FORMAT_SSIZE_T "z"
#if PY_MAJOR_VERSION < 3
#define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#define __Pyx_DefaultClassType PyClass_Type
#else
#define __Pyx_BUILTIN_MODULE_NAME "builtins"
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#define __Pyx_DefaultClassType PyType_Type
#endif
#ifndef Py_TPFLAGS_CHECKTYPES
#define Py_TPFLAGS_CHECKTYPES 0
#endif
#ifndef Py_TPFLAGS_HAVE_INDEX
#define Py_TPFLAGS_HAVE_INDEX 0
#endif
#ifndef Py_TPFLAGS_HAVE_NEWBUFFER
#define Py_TPFLAGS_HAVE_NEWBUFFER 0
#endif
#ifndef Py_TPFLAGS_HAVE_FINALIZE
#define Py_TPFLAGS_HAVE_FINALIZE 0
#endif
#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND)
#define CYTHON_PEP393_ENABLED 1
#define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\
0 : _PyUnicode_Ready((PyObject *)(op)))
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i)
#define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u)
#define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u)
#define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i)
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u)))
#else
#define CYTHON_PEP393_ENABLED 0
#define __Pyx_PyUnicode_READY(op) (0)
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i]))
#define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE))
#define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u))
#define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i]))
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u))
#endif
#if CYTHON_COMPILING_IN_PYPY
#define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b)
#else
#define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\
PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b))
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains)
#define PyUnicode_Contains(u, s) PySequence_Contains(u, s)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format)
#define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc)
#define PyObject_Malloc(s) PyMem_Malloc(s)
#define PyObject_Free(p) PyMem_Free(p)
#define PyObject_Realloc(p) PyMem_Realloc(p)
#endif
#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b))
#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b)
#else
#define __Pyx_PyString_Format(a, b) PyString_Format(a, b)
#endif
#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII)
#define PyObject_ASCII(o) PyObject_Repr(o)
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBaseString_Type PyUnicode_Type
#define PyStringObject PyUnicodeObject
#define PyString_Type PyUnicode_Type
#define PyString_Check PyUnicode_Check
#define PyString_CheckExact PyUnicode_CheckExact
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj)
#define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj)
#else
#define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj))
#define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj))
#endif
#ifndef PySet_CheckExact
#define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type)
#endif
#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type)
#if PY_MAJOR_VERSION >= 3
#define PyIntObject PyLongObject
#define PyInt_Type PyLong_Type
#define PyInt_Check(op) PyLong_Check(op)
#define PyInt_CheckExact(op) PyLong_CheckExact(op)
#define PyInt_FromString PyLong_FromString
#define PyInt_FromUnicode PyLong_FromUnicode
#define PyInt_FromLong PyLong_FromLong
#define PyInt_FromSize_t PyLong_FromSize_t
#define PyInt_FromSsize_t PyLong_FromSsize_t
#define PyInt_AsLong PyLong_AsLong
#define PyInt_AS_LONG PyLong_AS_LONG
#define PyInt_AsSsize_t PyLong_AsSsize_t
#define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask
#define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask
#define PyNumber_Int PyNumber_Long
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBoolObject PyLongObject
#endif
#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY
#ifndef PyUnicode_InternFromString
#define PyUnicode_InternFromString(s) PyUnicode_FromString(s)
#endif
#endif
#if PY_VERSION_HEX < 0x030200A4
typedef long Py_hash_t;
#define __Pyx_PyInt_FromHash_t PyInt_FromLong
#define __Pyx_PyInt_AsHash_t PyInt_AsLong
#else
#define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t
#define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func))
#else
#define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass)
#endif
#if PY_VERSION_HEX >= 0x030500B1
#define __Pyx_PyAsyncMethodsStruct PyAsyncMethods
#define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async)
#elif CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
typedef struct {
unaryfunc am_await;
unaryfunc am_aiter;
unaryfunc am_anext;
} __Pyx_PyAsyncMethodsStruct;
#define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved))
#else
#define __Pyx_PyType_AsAsync(obj) NULL
#endif
#ifndef CYTHON_RESTRICT
#if defined(__GNUC__)
#define CYTHON_RESTRICT __restrict__
#elif defined(_MSC_VER) && _MSC_VER >= 1400
#define CYTHON_RESTRICT __restrict
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_RESTRICT restrict
#else
#define CYTHON_RESTRICT
#endif
#endif
#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None)
#ifndef __cplusplus
#error "Cython files generated with the C++ option must be compiled with a C++ compiler."
#endif
#ifndef CYTHON_INLINE
#define CYTHON_INLINE inline
#endif
template<typename T>
void __Pyx_call_destructor(T& x) {
x.~T();
}
template<typename T>
class __Pyx_FakeReference {
public:
__Pyx_FakeReference() : ptr(NULL) { }
__Pyx_FakeReference(const T& ref) : ptr(const_cast<T*>(&ref)) { }
T *operator->() { return ptr; }
operator T&() { return *ptr; }
private:
T *ptr;
};
#if defined(WIN32) || defined(MS_WINDOWS)
#define _USE_MATH_DEFINES
#endif
#include <math.h>
#ifdef NAN
#define __PYX_NAN() ((float) NAN)
#else
static CYTHON_INLINE float __PYX_NAN() {
float value;
memset(&value, 0xFF, sizeof(value));
return value;
}
#endif
#define __PYX_ERR(f_index, lineno, Ln_error) \
{ \
__pyx_filename = __pyx_f[f_index]; __pyx_lineno = lineno; __pyx_clineno = __LINE__; goto Ln_error; \
}
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y)
#else
#define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y)
#endif
#ifndef __PYX_EXTERN_C
#ifdef __cplusplus
#define __PYX_EXTERN_C extern "C"
#else
#define __PYX_EXTERN_C extern
#endif
#endif
#define __PYX_HAVE__nms__gpu_nms
#define __PYX_HAVE_API__nms__gpu_nms
#include "string.h"
#include "stdio.h"
#include "stdlib.h"
#include "numpy/arrayobject.h"
#include "numpy/ufuncobject.h"
#include "gpu_nms.hpp"
#ifdef _OPENMP
#include <omp.h>
#endif /* _OPENMP */
#ifdef PYREX_WITHOUT_ASSERTIONS
#define CYTHON_WITHOUT_ASSERTIONS
#endif
#ifndef CYTHON_UNUSED
# if defined(__GNUC__)
# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
#endif
#ifndef CYTHON_NCP_UNUSED
# if CYTHON_COMPILING_IN_CPYTHON
# define CYTHON_NCP_UNUSED
# else
# define CYTHON_NCP_UNUSED CYTHON_UNUSED
# endif
#endif
typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding;
const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry;
#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0
#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 0
#define __PYX_DEFAULT_STRING_ENCODING ""
#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#define __Pyx_uchar_cast(c) ((unsigned char)c)
#define __Pyx_long_cast(x) ((long)x)
#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\
(sizeof(type) < sizeof(Py_ssize_t)) ||\
(sizeof(type) > sizeof(Py_ssize_t) &&\
likely(v < (type)PY_SSIZE_T_MAX ||\
v == (type)PY_SSIZE_T_MAX) &&\
(!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\
v == (type)PY_SSIZE_T_MIN))) ||\
(sizeof(type) == sizeof(Py_ssize_t) &&\
(is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\
v == (type)PY_SSIZE_T_MAX))) )
#if defined (__cplusplus) && __cplusplus >= 201103L
#include <cstdlib>
#define __Pyx_sst_abs(value) std::abs(value)
#elif SIZEOF_INT >= SIZEOF_SIZE_T
#define __Pyx_sst_abs(value) abs(value)
#elif SIZEOF_LONG >= SIZEOF_SIZE_T
#define __Pyx_sst_abs(value) labs(value)
#elif defined (_MSC_VER) && defined (_M_X64)
#define __Pyx_sst_abs(value) _abs64(value)
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define __Pyx_sst_abs(value) llabs(value)
#elif defined (__GNUC__)
#define __Pyx_sst_abs(value) __builtin_llabs(value)
#else
#define __Pyx_sst_abs(value) ((value<0) ? -value : value)
#endif
static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject*);
static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length);
#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s))
#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l)
#define __Pyx_PyBytes_FromString PyBytes_FromString
#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*);
#if PY_MAJOR_VERSION < 3
#define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#else
#define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize
#endif
#define __Pyx_PyObject_AsSString(s) ((signed char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s)
#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s)
#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s)
#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s)
#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s)
#if PY_MAJOR_VERSION < 3
static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u)
{
const Py_UNICODE *u_end = u;
while (*u_end++) ;
return (size_t)(u_end - u - 1);
}
#else
#define __Pyx_Py_UNICODE_strlen Py_UNICODE_strlen
#endif
#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u))
#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode
#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode
#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj)
#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None)
#define __Pyx_PyBool_FromLong(b) ((b) ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False))
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);
static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x);
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t);
#if CYTHON_COMPILING_IN_CPYTHON
#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x))
#else
#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x)
#endif
#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x))
#else
#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x))
#endif
#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x))
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
static int __Pyx_sys_getdefaultencoding_not_ascii;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
PyObject* ascii_chars_u = NULL;
PyObject* ascii_chars_b = NULL;
const char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
if (strcmp(default_encoding_c, "ascii") == 0) {
__Pyx_sys_getdefaultencoding_not_ascii = 0;
} else {
char ascii_chars[128];
int c;
for (c = 0; c < 128; c++) {
ascii_chars[c] = c;
}
__Pyx_sys_getdefaultencoding_not_ascii = 1;
ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL);
if (!ascii_chars_u) goto bad;
ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL);
if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) {
PyErr_Format(
PyExc_ValueError,
"This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.",
default_encoding_c);
goto bad;
}
Py_DECREF(ascii_chars_u);
Py_DECREF(ascii_chars_b);
}
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
Py_XDECREF(ascii_chars_u);
Py_XDECREF(ascii_chars_b);
return -1;
}
#endif
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL)
#else
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL)
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
static char* __PYX_DEFAULT_STRING_ENCODING;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
__PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c));
if (!__PYX_DEFAULT_STRING_ENCODING) goto bad;
strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c);
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
return -1;
}
#endif
#endif
/* Test for GCC > 2.95 */
#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#else /* !__GNUC__ or GCC < 2.95 */
#define likely(x) (x)
#define unlikely(x) (x)
#endif /* __GNUC__ */
static PyObject *__pyx_m;
static PyObject *__pyx_d;
static PyObject *__pyx_b;
static PyObject *__pyx_empty_tuple;
static PyObject *__pyx_empty_bytes;
static PyObject *__pyx_empty_unicode;
static int __pyx_lineno;
static int __pyx_clineno = 0;
static const char * __pyx_cfilenm= __FILE__;
static const char *__pyx_filename;
/* None.proto */
#if !defined(CYTHON_CCOMPLEX)
#if defined(__cplusplus)
#define CYTHON_CCOMPLEX 1
#elif defined(_Complex_I)
#define CYTHON_CCOMPLEX 1
#else
#define CYTHON_CCOMPLEX 0
#endif
#endif
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
#include <complex>
#else
#include <complex.h>
#endif
#endif
#if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__)
#undef _Complex_I
#define _Complex_I 1.0fj
#endif
static const char *__pyx_f[] = {
"nms\\gpu_nms.pyx",
"__init__.pxd",
"type.pxd",
};
/* BufferFormatStructs.proto */
#define IS_UNSIGNED(type) (((type) -1) > 0)
struct __Pyx_StructField_;
#define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0)
typedef struct {
const char* name;
struct __Pyx_StructField_* fields;
size_t size;
size_t arraysize[8];
int ndim;
char typegroup;
char is_unsigned;
int flags;
} __Pyx_TypeInfo;
typedef struct __Pyx_StructField_ {
__Pyx_TypeInfo* type;
const char* name;
size_t offset;
} __Pyx_StructField;
typedef struct {
__Pyx_StructField* field;
size_t parent_offset;
} __Pyx_BufFmt_StackElem;
typedef struct {
__Pyx_StructField root;
__Pyx_BufFmt_StackElem* head;
size_t fmt_offset;
size_t new_count, enc_count;
size_t struct_alignment;
int is_complex;
char enc_type;
char new_packmode;
char enc_packmode;
char is_valid_array;
} __Pyx_BufFmt_Context;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":725
* # in Cython to enable them only on the right systems.
*
* ctypedef npy_int8 int8_t # <<<<<<<<<<<<<<
* ctypedef npy_int16 int16_t
* ctypedef npy_int32 int32_t
*/
typedef npy_int8 __pyx_t_5numpy_int8_t;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":726
*
* ctypedef npy_int8 int8_t
* ctypedef npy_int16 int16_t # <<<<<<<<<<<<<<
* ctypedef npy_int32 int32_t
* ctypedef npy_int64 int64_t
*/
typedef npy_int16 __pyx_t_5numpy_int16_t;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":727
* ctypedef npy_int8 int8_t
* ctypedef npy_int16 int16_t
* ctypedef npy_int32 int32_t # <<<<<<<<<<<<<<
* ctypedef npy_int64 int64_t
* #ctypedef npy_int96 int96_t
*/
typedef npy_int32 __pyx_t_5numpy_int32_t;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":728
* ctypedef npy_int16 int16_t
* ctypedef npy_int32 int32_t
* ctypedef npy_int64 int64_t # <<<<<<<<<<<<<<
* #ctypedef npy_int96 int96_t
* #ctypedef npy_int128 int128_t
*/
typedef npy_int64 __pyx_t_5numpy_int64_t;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":732
* #ctypedef npy_int128 int128_t
*
* ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<<
* ctypedef npy_uint16 uint16_t
* ctypedef npy_uint32 uint32_t
*/
typedef npy_uint8 __pyx_t_5numpy_uint8_t;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":733
*
* ctypedef npy_uint8 uint8_t
* ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<<
* ctypedef npy_uint32 uint32_t
* ctypedef npy_uint64 uint64_t
*/
typedef npy_uint16 __pyx_t_5numpy_uint16_t;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":734
* ctypedef npy_uint8 uint8_t
* ctypedef npy_uint16 uint16_t
* ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<<
* ctypedef npy_uint64 uint64_t
* #ctypedef npy_uint96 uint96_t
*/
typedef npy_uint32 __pyx_t_5numpy_uint32_t;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":735
* ctypedef npy_uint16 uint16_t
* ctypedef npy_uint32 uint32_t
* ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<<
* #ctypedef npy_uint96 uint96_t
* #ctypedef npy_uint128 uint128_t
*/
typedef npy_uint64 __pyx_t_5numpy_uint64_t;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":739
* #ctypedef npy_uint128 uint128_t
*
* ctypedef npy_float32 float32_t # <<<<<<<<<<<<<<
* ctypedef npy_float64 float64_t
* #ctypedef npy_float80 float80_t
*/
typedef npy_float32 __pyx_t_5numpy_float32_t;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":740
*
* ctypedef npy_float32 float32_t
* ctypedef npy_float64 float64_t # <<<<<<<<<<<<<<
* #ctypedef npy_float80 float80_t
* #ctypedef npy_float128 float128_t
*/
typedef npy_float64 __pyx_t_5numpy_float64_t;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":749
* # The int types are mapped a bit surprising --
* # numpy.int corresponds to 'l' and numpy.long to 'q'
* ctypedef npy_long int_t # <<<<<<<<<<<<<<
* ctypedef npy_longlong long_t
* ctypedef npy_longlong longlong_t
*/
typedef npy_long __pyx_t_5numpy_int_t;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":750
* # numpy.int corresponds to 'l' and numpy.long to 'q'
* ctypedef npy_long int_t
* ctypedef npy_longlong long_t # <<<<<<<<<<<<<<
* ctypedef npy_longlong longlong_t
*
*/
typedef npy_longlong __pyx_t_5numpy_long_t;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":751
* ctypedef npy_long int_t
* ctypedef npy_longlong long_t
* ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<<
*
* ctypedef npy_ulong uint_t
*/
typedef npy_longlong __pyx_t_5numpy_longlong_t;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":753
* ctypedef npy_longlong longlong_t
*
* ctypedef npy_ulong uint_t # <<<<<<<<<<<<<<
* ctypedef npy_ulonglong ulong_t
* ctypedef npy_ulonglong ulonglong_t
*/
typedef npy_ulong __pyx_t_5numpy_uint_t;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":754
*
* ctypedef npy_ulong uint_t
* ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<<
* ctypedef npy_ulonglong ulonglong_t
*
*/
typedef npy_ulonglong __pyx_t_5numpy_ulong_t;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":755
* ctypedef npy_ulong uint_t
* ctypedef npy_ulonglong ulong_t
* ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<<
*
* ctypedef npy_intp intp_t
*/
typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":757
* ctypedef npy_ulonglong ulonglong_t
*
* ctypedef npy_intp intp_t # <<<<<<<<<<<<<<
* ctypedef npy_uintp uintp_t
*
*/
typedef npy_intp __pyx_t_5numpy_intp_t;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":758
*
* ctypedef npy_intp intp_t
* ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<<
*
* ctypedef npy_double float_t
*/
typedef npy_uintp __pyx_t_5numpy_uintp_t;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":760
* ctypedef npy_uintp uintp_t
*
* ctypedef npy_double float_t # <<<<<<<<<<<<<<
* ctypedef npy_double double_t
* ctypedef npy_longdouble longdouble_t
*/
typedef npy_double __pyx_t_5numpy_float_t;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":761
*
* ctypedef npy_double float_t
* ctypedef npy_double double_t # <<<<<<<<<<<<<<
* ctypedef npy_longdouble longdouble_t
*
*/
typedef npy_double __pyx_t_5numpy_double_t;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":762
* ctypedef npy_double float_t
* ctypedef npy_double double_t
* ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<<
*
* ctypedef npy_cfloat cfloat_t
*/
typedef npy_longdouble __pyx_t_5numpy_longdouble_t;
/* None.proto */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
typedef ::std::complex< float > __pyx_t_float_complex;
#else
typedef float _Complex __pyx_t_float_complex;
#endif
#else
typedef struct { float real, imag; } __pyx_t_float_complex;
#endif
/* None.proto */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
typedef ::std::complex< double > __pyx_t_double_complex;
#else
typedef double _Complex __pyx_t_double_complex;
#endif
#else
typedef struct { double real, imag; } __pyx_t_double_complex;
#endif
/*--- Type declarations ---*/
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":764
* ctypedef npy_longdouble longdouble_t
*
* ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<<
* ctypedef npy_cdouble cdouble_t
* ctypedef npy_clongdouble clongdouble_t
*/
typedef npy_cfloat __pyx_t_5numpy_cfloat_t;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":765
*
* ctypedef npy_cfloat cfloat_t
* ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<<
* ctypedef npy_clongdouble clongdouble_t
*
*/
typedef npy_cdouble __pyx_t_5numpy_cdouble_t;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":766
* ctypedef npy_cfloat cfloat_t
* ctypedef npy_cdouble cdouble_t
* ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<<
*
* ctypedef npy_cdouble complex_t
*/
typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":768
* ctypedef npy_clongdouble clongdouble_t
*
* ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew1(a):
*/
typedef npy_cdouble __pyx_t_5numpy_complex_t;
/* --- Runtime support code (head) --- */
/* Refnanny.proto */
#ifndef CYTHON_REFNANNY
#define CYTHON_REFNANNY 0
#endif
#if CYTHON_REFNANNY
typedef struct {
void (*INCREF)(void*, PyObject*, int);
void (*DECREF)(void*, PyObject*, int);
void (*GOTREF)(void*, PyObject*, int);
void (*GIVEREF)(void*, PyObject*, int);
void* (*SetupContext)(const char*, int, const char*);
void (*FinishContext)(void**);
} __Pyx_RefNannyAPIStruct;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname);
#define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL;
#ifdef WITH_THREAD
#define __Pyx_RefNannySetupContext(name, acquire_gil)\
if (acquire_gil) {\
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
PyGILState_Release(__pyx_gilstate_save);\
} else {\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
}
#else
#define __Pyx_RefNannySetupContext(name, acquire_gil)\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__)
#endif
#define __Pyx_RefNannyFinishContext()\
__Pyx_RefNanny->FinishContext(&__pyx_refnanny)
#define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0)
#define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0)
#define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0)
#define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0)
#else
#define __Pyx_RefNannyDeclarations
#define __Pyx_RefNannySetupContext(name, acquire_gil)
#define __Pyx_RefNannyFinishContext()
#define __Pyx_INCREF(r) Py_INCREF(r)
#define __Pyx_DECREF(r) Py_DECREF(r)
#define __Pyx_GOTREF(r)
#define __Pyx_GIVEREF(r)
#define __Pyx_XINCREF(r) Py_XINCREF(r)
#define __Pyx_XDECREF(r) Py_XDECREF(r)
#define __Pyx_XGOTREF(r)
#define __Pyx_XGIVEREF(r)
#endif
#define __Pyx_XDECREF_SET(r, v) do {\
PyObject *tmp = (PyObject *) r;\
r = v; __Pyx_XDECREF(tmp);\
} while (0)
#define __Pyx_DECREF_SET(r, v) do {\
PyObject *tmp = (PyObject *) r;\
r = v; __Pyx_DECREF(tmp);\
} while (0)
#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0)
#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0)
/* RaiseArgTupleInvalid.proto */
static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found);
/* RaiseDoubleKeywords.proto */
static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name);
/* ParseKeywords.proto */
static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\
PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\
const char* function_name);
/* ArgTypeTest.proto */
static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed,
const char *name, int exact);
/* BufferFormatCheck.proto */
static CYTHON_INLINE int __Pyx_GetBufferAndValidate(Py_buffer* buf, PyObject* obj,
__Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack);
static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info);
static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts);
static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
__Pyx_BufFmt_StackElem* stack,
__Pyx_TypeInfo* type); // PROTO
/* PyObjectGetAttrStr.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) {
PyTypeObject* tp = Py_TYPE(obj);
if (likely(tp->tp_getattro))
return tp->tp_getattro(obj, attr_name);
#if PY_MAJOR_VERSION < 3
if (likely(tp->tp_getattr))
return tp->tp_getattr(obj, PyString_AS_STRING(attr_name));
#endif
return PyObject_GetAttr(obj, attr_name);
}
#else
#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n)
#endif
/* GetBuiltinName.proto */
static PyObject *__Pyx_GetBuiltinName(PyObject *name);
/* GetModuleGlobalName.proto */
static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name);
/* PyObjectCall.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw);
#else
#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw)
#endif
/* ExtTypeTest.proto */
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type);
/* PyObjectCallMethO.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg);
#endif
/* PyObjectCallOneArg.proto */
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg);
/* PyObjectCallNoArg.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func);
#else
#define __Pyx_PyObject_CallNoArg(func) __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL)
#endif
/* BufferIndexError.proto */
static void __Pyx_RaiseBufferIndexError(int axis);
#define __Pyx_BufPtrStrided1d(type, buf, i0, s0) (type)((char*)buf + i0 * s0)
#define __Pyx_BufPtrStrided2d(type, buf, i0, s0, i1, s1) (type)((char*)buf + i0 * s0 + i1 * s1)
/* SliceObject.proto */
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice(
PyObject* obj, Py_ssize_t cstart, Py_ssize_t cstop,
PyObject** py_start, PyObject** py_stop, PyObject** py_slice,
int has_cstart, int has_cstop, int wraparound);
/* BufferFallbackError.proto */
static void __Pyx_RaiseBufferFallbackError(void);
/* PyThreadStateGet.proto */
#if CYTHON_COMPILING_IN_CPYTHON
#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate;
#define __Pyx_PyThreadState_assign __pyx_tstate = PyThreadState_GET();
#else
#define __Pyx_PyThreadState_declare
#define __Pyx_PyThreadState_assign
#endif
/* PyErrFetchRestore.proto */
#if CYTHON_COMPILING_IN_CPYTHON
#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb)
#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#else
#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb)
#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb)
#endif
/* RaiseException.proto */
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause);
/* DictGetItem.proto */
#if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY
static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key) {
PyObject *value;
value = PyDict_GetItemWithError(d, key);
if (unlikely(!value)) {
if (!PyErr_Occurred()) {
PyObject* args = PyTuple_Pack(1, key);
if (likely(args))
PyErr_SetObject(PyExc_KeyError, args);
Py_XDECREF(args);
}
return NULL;
}
Py_INCREF(value);
return value;
}
#else
#define __Pyx_PyDict_GetItem(d, key) PyObject_GetItem(d, key)
#endif
/* RaiseTooManyValuesToUnpack.proto */
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected);
/* RaiseNeedMoreValuesToUnpack.proto */
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index);
/* RaiseNoneIterError.proto */
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void);
/* Import.proto */
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level);
/* CodeObjectCache.proto */
typedef struct {
PyCodeObject* code_object;
int code_line;
} __Pyx_CodeObjectCacheEntry;
struct __Pyx_CodeObjectCache {
int count;
int max_count;
__Pyx_CodeObjectCacheEntry* entries;
};
static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL};
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line);
static PyCodeObject *__pyx_find_code_object(int code_line);
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object);
/* AddTraceback.proto */
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename);
/* BufferStructDeclare.proto */
typedef struct {
Py_ssize_t shape, strides, suboffsets;
} __Pyx_Buf_DimInfo;
typedef struct {
size_t refcount;
Py_buffer pybuffer;
} __Pyx_Buffer;
typedef struct {
__Pyx_Buffer *rcbuffer;
char *data;
__Pyx_Buf_DimInfo diminfo[8];
} __Pyx_LocalBuf_ND;
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags);
static void __Pyx_ReleaseBuffer(Py_buffer *view);
#else
#define __Pyx_GetBuffer PyObject_GetBuffer
#define __Pyx_ReleaseBuffer PyBuffer_Release
#endif
/* None.proto */
static Py_ssize_t __Pyx_zeros[] = {0, 0, 0, 0, 0, 0, 0, 0};
static Py_ssize_t __Pyx_minusones[] = {-1, -1, -1, -1, -1, -1, -1, -1};
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value);
/* None.proto */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
#define __Pyx_CREAL(z) ((z).real())
#define __Pyx_CIMAG(z) ((z).imag())
#else
#define __Pyx_CREAL(z) (__real__(z))
#define __Pyx_CIMAG(z) (__imag__(z))
#endif
#else
#define __Pyx_CREAL(z) ((z).real)
#define __Pyx_CIMAG(z) ((z).imag)
#endif
#if defined(__cplusplus) && CYTHON_CCOMPLEX && (defined(_WIN32) || defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5 || __GNUC__ == 4 && __GNUC_MINOR__ >= 4 )) || __cplusplus >= 201103)
#define __Pyx_SET_CREAL(z,x) ((z).real(x))
#define __Pyx_SET_CIMAG(z,y) ((z).imag(y))
#else
#define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x)
#define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y)
#endif
/* None.proto */
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float);
/* None.proto */
#if CYTHON_CCOMPLEX
#define __Pyx_c_eqf(a, b) ((a)==(b))
#define __Pyx_c_sumf(a, b) ((a)+(b))
#define __Pyx_c_difff(a, b) ((a)-(b))
#define __Pyx_c_prodf(a, b) ((a)*(b))
#define __Pyx_c_quotf(a, b) ((a)/(b))
#define __Pyx_c_negf(a) (-(a))
#ifdef __cplusplus
#define __Pyx_c_is_zerof(z) ((z)==(float)0)
#define __Pyx_c_conjf(z) (::std::conj(z))
#if 1
#define __Pyx_c_absf(z) (::std::abs(z))
#define __Pyx_c_powf(a, b) (::std::pow(a, b))
#endif
#else
#define __Pyx_c_is_zerof(z) ((z)==0)
#define __Pyx_c_conjf(z) (conjf(z))
#if 1
#define __Pyx_c_absf(z) (cabsf(z))
#define __Pyx_c_powf(a, b) (cpowf(a, b))
#endif
#endif
#else
static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex);
static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex);
#if 1
static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex, __pyx_t_float_complex);
#endif
#endif
/* None.proto */
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double);
/* None.proto */
#if CYTHON_CCOMPLEX
#define __Pyx_c_eq(a, b) ((a)==(b))
#define __Pyx_c_sum(a, b) ((a)+(b))
#define __Pyx_c_diff(a, b) ((a)-(b))
#define __Pyx_c_prod(a, b) ((a)*(b))
#define __Pyx_c_quot(a, b) ((a)/(b))
#define __Pyx_c_neg(a) (-(a))
#ifdef __cplusplus
#define __Pyx_c_is_zero(z) ((z)==(double)0)
#define __Pyx_c_conj(z) (::std::conj(z))
#if 1
#define __Pyx_c_abs(z) (::std::abs(z))
#define __Pyx_c_pow(a, b) (::std::pow(a, b))
#endif
#else
#define __Pyx_c_is_zero(z) ((z)==0)
#define __Pyx_c_conj(z) (conj(z))
#if 1
#define __Pyx_c_abs(z) (cabs(z))
#define __Pyx_c_pow(a, b) (cpow(a, b))
#endif
#endif
#else
static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex);
static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex);
#if 1
static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex, __pyx_t_double_complex);
#endif
#endif
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value);
/* CIntFromPy.proto */
static CYTHON_INLINE npy_int32 __Pyx_PyInt_As_npy_int32(PyObject *);
/* CIntFromPy.proto */
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *);
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value);
/* CIntFromPy.proto */
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *);
/* CheckBinaryVersion.proto */
static int __Pyx_check_binary_version(void);
/* PyIdentifierFromString.proto */
#if !defined(__Pyx_PyIdentifier_FromString)
#if PY_MAJOR_VERSION < 3
#define __Pyx_PyIdentifier_FromString(s) PyString_FromString(s)
#else
#define __Pyx_PyIdentifier_FromString(s) PyUnicode_FromString(s)
#endif
#endif
/* ModuleImport.proto */
static PyObject *__Pyx_ImportModule(const char *name);
/* TypeImport.proto */
static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict);
/* InitStrings.proto */
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t);
/* Module declarations from 'cpython.buffer' */
/* Module declarations from 'libc.string' */
/* Module declarations from 'libc.stdio' */
/* Module declarations from '__builtin__' */
/* Module declarations from 'cpython.type' */
static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0;
/* Module declarations from 'cpython' */
/* Module declarations from 'cpython.object' */
/* Module declarations from 'cpython.ref' */
/* Module declarations from 'libc.stdlib' */
/* Module declarations from 'numpy' */
/* Module declarations from 'numpy' */
static PyTypeObject *__pyx_ptype_5numpy_dtype = 0;
static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0;
static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0;
static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0;
static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0;
static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/
/* Module declarations from 'nms.gpu_nms' */
static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_5numpy_float32_t = { "float32_t", NULL, sizeof(__pyx_t_5numpy_float32_t), { 0 }, 0, 'R', 0, 0 };
static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_5numpy_int32_t = { "int32_t", NULL, sizeof(__pyx_t_5numpy_int32_t), { 0 }, 0, IS_UNSIGNED(__pyx_t_5numpy_int32_t) ? 'U' : 'I', IS_UNSIGNED(__pyx_t_5numpy_int32_t), 0 };
static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t = { "intp_t", NULL, sizeof(__pyx_t_5numpy_intp_t), { 0 }, 0, IS_UNSIGNED(__pyx_t_5numpy_intp_t) ? 'U' : 'I', IS_UNSIGNED(__pyx_t_5numpy_intp_t), 0 };
#define __Pyx_MODULE_NAME "nms.gpu_nms"
int __pyx_module_is_main_nms__gpu_nms = 0;
/* Implementation of 'nms.gpu_nms' */
static PyObject *__pyx_builtin_ValueError;
static PyObject *__pyx_builtin_range;
static PyObject *__pyx_builtin_RuntimeError;
static const char __pyx_k_np[] = "np";
static const char __pyx_k_dets[] = "dets";
static const char __pyx_k_keep[] = "keep";
static const char __pyx_k_main[] = "__main__";
static const char __pyx_k_test[] = "__test__";
static const char __pyx_k_dtype[] = "dtype";
static const char __pyx_k_int32[] = "int32";
static const char __pyx_k_numpy[] = "numpy";
static const char __pyx_k_order[] = "order";
static const char __pyx_k_range[] = "range";
static const char __pyx_k_zeros[] = "zeros";
static const char __pyx_k_import[] = "__import__";
static const char __pyx_k_scores[] = "scores";
static const char __pyx_k_thresh[] = "thresh";
static const char __pyx_k_argsort[] = "argsort";
static const char __pyx_k_gpu_nms[] = "gpu_nms";
static const char __pyx_k_num_out[] = "num_out";
static const char __pyx_k_boxes_dim[] = "boxes_dim";
static const char __pyx_k_boxes_num[] = "boxes_num";
static const char __pyx_k_device_id[] = "device_id";
static const char __pyx_k_ValueError[] = "ValueError";
static const char __pyx_k_nms_gpu_nms[] = "nms.gpu_nms";
static const char __pyx_k_sorted_dets[] = "sorted_dets";
static const char __pyx_k_RuntimeError[] = "RuntimeError";
static const char __pyx_k_ndarray_is_not_C_contiguous[] = "ndarray is not C contiguous";
static const char __pyx_k_unknown_dtype_code_in_numpy_pxd[] = "unknown dtype code in numpy.pxd (%d)";
static const char __pyx_k_D_v_zix_caffe_caffe_win_20160523[] = "D:\\v-zix\\caffe\\caffe-win-20160523\\models\\py-faster-rcnn-windows\\lib\\nms\\gpu_nms.pyx";
static const char __pyx_k_Format_string_allocated_too_shor[] = "Format string allocated too short, see comment in numpy.pxd";
static const char __pyx_k_Non_native_byte_order_not_suppor[] = "Non-native byte order not supported";
static const char __pyx_k_ndarray_is_not_Fortran_contiguou[] = "ndarray is not Fortran contiguous";
static const char __pyx_k_Format_string_allocated_too_shor_2[] = "Format string allocated too short.";
static PyObject *__pyx_kp_s_D_v_zix_caffe_caffe_win_20160523;
static PyObject *__pyx_kp_u_Format_string_allocated_too_shor;
static PyObject *__pyx_kp_u_Format_string_allocated_too_shor_2;
static PyObject *__pyx_kp_u_Non_native_byte_order_not_suppor;
static PyObject *__pyx_n_s_RuntimeError;
static PyObject *__pyx_n_s_ValueError;
static PyObject *__pyx_n_s_argsort;
static PyObject *__pyx_n_s_boxes_dim;
static PyObject *__pyx_n_s_boxes_num;
static PyObject *__pyx_n_s_dets;
static PyObject *__pyx_n_s_device_id;
static PyObject *__pyx_n_s_dtype;
static PyObject *__pyx_n_s_gpu_nms;
static PyObject *__pyx_n_s_import;
static PyObject *__pyx_n_s_int32;
static PyObject *__pyx_n_s_keep;
static PyObject *__pyx_n_s_main;
static PyObject *__pyx_kp_u_ndarray_is_not_C_contiguous;
static PyObject *__pyx_kp_u_ndarray_is_not_Fortran_contiguou;
static PyObject *__pyx_n_s_nms_gpu_nms;
static PyObject *__pyx_n_s_np;
static PyObject *__pyx_n_s_num_out;
static PyObject *__pyx_n_s_numpy;
static PyObject *__pyx_n_s_order;
static PyObject *__pyx_n_s_range;
static PyObject *__pyx_n_s_scores;
static PyObject *__pyx_n_s_sorted_dets;
static PyObject *__pyx_n_s_test;
static PyObject *__pyx_n_s_thresh;
static PyObject *__pyx_kp_u_unknown_dtype_code_in_numpy_pxd;
static PyObject *__pyx_n_s_zeros;
static PyObject *__pyx_pf_3nms_7gpu_nms_gpu_nms(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_dets, PyObject *__pyx_v_thresh, __pyx_t_5numpy_int32_t __pyx_v_device_id); /* proto */
static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */
static PyObject *__pyx_int_4;
static PyObject *__pyx_int_neg_1;
static PyObject *__pyx_slice_;
static PyObject *__pyx_slice__3;
static PyObject *__pyx_slice__4;
static PyObject *__pyx_tuple__2;
static PyObject *__pyx_tuple__5;
static PyObject *__pyx_tuple__6;
static PyObject *__pyx_tuple__7;
static PyObject *__pyx_tuple__8;
static PyObject *__pyx_tuple__9;
static PyObject *__pyx_tuple__10;
static PyObject *__pyx_tuple__11;
static PyObject *__pyx_codeobj__12;
/* "nms/gpu_nms.pyx":16
* void _nms(np.int32_t*, int*, np.float32_t*, int, int, float, int)
*
* def gpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh, # <<<<<<<<<<<<<<
* np.int32_t device_id=0):
* cdef int boxes_num = dets.shape[0]
*/
/* Python wrapper */
static PyObject *__pyx_pw_3nms_7gpu_nms_1gpu_nms(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyMethodDef __pyx_mdef_3nms_7gpu_nms_1gpu_nms = {"gpu_nms", (PyCFunction)__pyx_pw_3nms_7gpu_nms_1gpu_nms, METH_VARARGS|METH_KEYWORDS, 0};
static PyObject *__pyx_pw_3nms_7gpu_nms_1gpu_nms(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyArrayObject *__pyx_v_dets = 0;
PyObject *__pyx_v_thresh = 0;
__pyx_t_5numpy_int32_t __pyx_v_device_id;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("gpu_nms (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_dets,&__pyx_n_s_thresh,&__pyx_n_s_device_id,0};
PyObject* values[3] = {0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_dets)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
case 1:
if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_thresh)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("gpu_nms", 0, 2, 3, 1); __PYX_ERR(0, 16, __pyx_L3_error)
}
case 2:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_device_id);
if (value) { values[2] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gpu_nms") < 0)) __PYX_ERR(0, 16, __pyx_L3_error)
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_dets = ((PyArrayObject *)values[0]);
__pyx_v_thresh = ((PyObject*)values[1]);
if (values[2]) {
__pyx_v_device_id = __Pyx_PyInt_As_npy_int32(values[2]); if (unlikely((__pyx_v_device_id == (npy_int32)-1) && PyErr_Occurred())) __PYX_ERR(0, 17, __pyx_L3_error)
} else {
__pyx_v_device_id = ((__pyx_t_5numpy_int32_t)0);
}
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("gpu_nms", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 16, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("nms.gpu_nms.gpu_nms", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_dets), __pyx_ptype_5numpy_ndarray, 1, "dets", 0))) __PYX_ERR(0, 16, __pyx_L1_error)
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_thresh), (&PyFloat_Type), 1, "thresh", 1))) __PYX_ERR(0, 16, __pyx_L1_error)
__pyx_r = __pyx_pf_3nms_7gpu_nms_gpu_nms(__pyx_self, __pyx_v_dets, __pyx_v_thresh, __pyx_v_device_id);
/* function exit code */
goto __pyx_L0;
__pyx_L1_error:;
__pyx_r = NULL;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_3nms_7gpu_nms_gpu_nms(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_dets, PyObject *__pyx_v_thresh, __pyx_t_5numpy_int32_t __pyx_v_device_id) {
int __pyx_v_boxes_num;
int __pyx_v_boxes_dim;
int __pyx_v_num_out;
PyArrayObject *__pyx_v_keep = 0;
PyArrayObject *__pyx_v_scores = 0;
PyArrayObject *__pyx_v_order = 0;
PyArrayObject *__pyx_v_sorted_dets = 0;
__Pyx_LocalBuf_ND __pyx_pybuffernd_dets;
__Pyx_Buffer __pyx_pybuffer_dets;
__Pyx_LocalBuf_ND __pyx_pybuffernd_keep;
__Pyx_Buffer __pyx_pybuffer_keep;
__Pyx_LocalBuf_ND __pyx_pybuffernd_order;
__Pyx_Buffer __pyx_pybuffer_order;
__Pyx_LocalBuf_ND __pyx_pybuffernd_scores;
__Pyx_Buffer __pyx_pybuffer_scores;
__Pyx_LocalBuf_ND __pyx_pybuffernd_sorted_dets;
__Pyx_Buffer __pyx_pybuffer_sorted_dets;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyArrayObject *__pyx_t_6 = NULL;
PyArrayObject *__pyx_t_7 = NULL;
PyArrayObject *__pyx_t_8 = NULL;
PyArrayObject *__pyx_t_9 = NULL;
Py_ssize_t __pyx_t_10;
int __pyx_t_11;
Py_ssize_t __pyx_t_12;
Py_ssize_t __pyx_t_13;
float __pyx_t_14;
PyObject *__pyx_t_15 = NULL;
PyObject *__pyx_t_16 = NULL;
PyObject *__pyx_t_17 = NULL;
__Pyx_RefNannySetupContext("gpu_nms", 0);
__pyx_pybuffer_keep.pybuffer.buf = NULL;
__pyx_pybuffer_keep.refcount = 0;
__pyx_pybuffernd_keep.data = NULL;
__pyx_pybuffernd_keep.rcbuffer = &__pyx_pybuffer_keep;
__pyx_pybuffer_scores.pybuffer.buf = NULL;
__pyx_pybuffer_scores.refcount = 0;
__pyx_pybuffernd_scores.data = NULL;
__pyx_pybuffernd_scores.rcbuffer = &__pyx_pybuffer_scores;
__pyx_pybuffer_order.pybuffer.buf = NULL;
__pyx_pybuffer_order.refcount = 0;
__pyx_pybuffernd_order.data = NULL;
__pyx_pybuffernd_order.rcbuffer = &__pyx_pybuffer_order;
__pyx_pybuffer_sorted_dets.pybuffer.buf = NULL;
__pyx_pybuffer_sorted_dets.refcount = 0;
__pyx_pybuffernd_sorted_dets.data = NULL;
__pyx_pybuffernd_sorted_dets.rcbuffer = &__pyx_pybuffer_sorted_dets;
__pyx_pybuffer_dets.pybuffer.buf = NULL;
__pyx_pybuffer_dets.refcount = 0;
__pyx_pybuffernd_dets.data = NULL;
__pyx_pybuffernd_dets.rcbuffer = &__pyx_pybuffer_dets;
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_dets.rcbuffer->pybuffer, (PyObject*)__pyx_v_dets, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float32_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) __PYX_ERR(0, 16, __pyx_L1_error)
}
__pyx_pybuffernd_dets.diminfo[0].strides = __pyx_pybuffernd_dets.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_dets.diminfo[0].shape = __pyx_pybuffernd_dets.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_dets.diminfo[1].strides = __pyx_pybuffernd_dets.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_dets.diminfo[1].shape = __pyx_pybuffernd_dets.rcbuffer->pybuffer.shape[1];
/* "nms/gpu_nms.pyx":18
* def gpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh,
* np.int32_t device_id=0):
* cdef int boxes_num = dets.shape[0] # <<<<<<<<<<<<<<
* cdef int boxes_dim = dets.shape[1]
* cdef int num_out
*/
__pyx_v_boxes_num = (__pyx_v_dets->dimensions[0]);
/* "nms/gpu_nms.pyx":19
* np.int32_t device_id=0):
* cdef int boxes_num = dets.shape[0]
* cdef int boxes_dim = dets.shape[1] # <<<<<<<<<<<<<<
* cdef int num_out
* cdef np.ndarray[np.int32_t, ndim=1] \
*/
__pyx_v_boxes_dim = (__pyx_v_dets->dimensions[1]);
/* "nms/gpu_nms.pyx":22
* cdef int num_out
* cdef np.ndarray[np.int32_t, ndim=1] \
* keep = np.zeros(boxes_num, dtype=np.int32) # <<<<<<<<<<<<<<
* cdef np.ndarray[np.float32_t, ndim=1] \
* scores = dets[:, 4]
*/
__pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 22, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_boxes_num); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 22, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);
__pyx_t_1 = 0;
__pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 22, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_int32); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_dtype, __pyx_t_5) < 0) __PYX_ERR(0, 22, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_3, __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 22, __pyx_L1_error)
__pyx_t_6 = ((PyArrayObject *)__pyx_t_5);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_keep.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_5numpy_int32_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {
__pyx_v_keep = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_keep.rcbuffer->pybuffer.buf = NULL;
__PYX_ERR(0, 21, __pyx_L1_error)
} else {__pyx_pybuffernd_keep.diminfo[0].strides = __pyx_pybuffernd_keep.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_keep.diminfo[0].shape = __pyx_pybuffernd_keep.rcbuffer->pybuffer.shape[0];
}
}
__pyx_t_6 = 0;
__pyx_v_keep = ((PyArrayObject *)__pyx_t_5);
__pyx_t_5 = 0;
/* "nms/gpu_nms.pyx":24
* keep = np.zeros(boxes_num, dtype=np.int32)
* cdef np.ndarray[np.float32_t, ndim=1] \
* scores = dets[:, 4] # <<<<<<<<<<<<<<
* #cdef np.ndarray[np.int_t, ndim=1] \ // 20160601, by xzn
* # order = scores.argsort()[::-1]
*/
__pyx_t_5 = PyObject_GetItem(((PyObject *)__pyx_v_dets), __pyx_tuple__2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 24, __pyx_L1_error)
__pyx_t_7 = ((PyArrayObject *)__pyx_t_5);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_scores.rcbuffer->pybuffer, (PyObject*)__pyx_t_7, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float32_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {
__pyx_v_scores = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_scores.rcbuffer->pybuffer.buf = NULL;
__PYX_ERR(0, 23, __pyx_L1_error)
} else {__pyx_pybuffernd_scores.diminfo[0].strides = __pyx_pybuffernd_scores.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_scores.diminfo[0].shape = __pyx_pybuffernd_scores.rcbuffer->pybuffer.shape[0];
}
}
__pyx_t_7 = 0;
__pyx_v_scores = ((PyArrayObject *)__pyx_t_5);
__pyx_t_5 = 0;
/* "nms/gpu_nms.pyx":28
* # order = scores.argsort()[::-1]
* cdef np.ndarray[np.intp_t, ndim=1] \
* order = scores.argsort()[::-1] # <<<<<<<<<<<<<<
* cdef np.ndarray[np.float32_t, ndim=2] \
* sorted_dets = dets[order, :]
*/
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_scores), __pyx_n_s_argsort); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 28, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = NULL;
if (CYTHON_COMPILING_IN_CPYTHON && likely(PyMethod_Check(__pyx_t_1))) {
__pyx_t_3 = PyMethod_GET_SELF(__pyx_t_1);
if (likely(__pyx_t_3)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1);
__Pyx_INCREF(__pyx_t_3);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_1, function);
}
}
if (__pyx_t_3) {
__pyx_t_5 = __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 28, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
} else {
__pyx_t_5 = __Pyx_PyObject_CallNoArg(__pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 28, __pyx_L1_error)
}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = PyObject_GetItem(__pyx_t_5, __pyx_slice__3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 28, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 28, __pyx_L1_error)
__pyx_t_8 = ((PyArrayObject *)__pyx_t_1);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_order.rcbuffer->pybuffer, (PyObject*)__pyx_t_8, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {
__pyx_v_order = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_order.rcbuffer->pybuffer.buf = NULL;
__PYX_ERR(0, 27, __pyx_L1_error)
} else {__pyx_pybuffernd_order.diminfo[0].strides = __pyx_pybuffernd_order.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_order.diminfo[0].shape = __pyx_pybuffernd_order.rcbuffer->pybuffer.shape[0];
}
}
__pyx_t_8 = 0;
__pyx_v_order = ((PyArrayObject *)__pyx_t_1);
__pyx_t_1 = 0;
/* "nms/gpu_nms.pyx":30
* order = scores.argsort()[::-1]
* cdef np.ndarray[np.float32_t, ndim=2] \
* sorted_dets = dets[order, :] # <<<<<<<<<<<<<<
* _nms(&keep[0], &num_out, &sorted_dets[0, 0], boxes_num, boxes_dim, thresh, device_id)
* keep = keep[:num_out]
*/
__pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 30, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(((PyObject *)__pyx_v_order));
__Pyx_GIVEREF(((PyObject *)__pyx_v_order));
PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_v_order));
__Pyx_INCREF(__pyx_slice__4);
__Pyx_GIVEREF(__pyx_slice__4);
PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_slice__4);
__pyx_t_5 = PyObject_GetItem(((PyObject *)__pyx_v_dets), __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 30, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 30, __pyx_L1_error)
__pyx_t_9 = ((PyArrayObject *)__pyx_t_5);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer, (PyObject*)__pyx_t_9, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float32_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {
__pyx_v_sorted_dets = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer.buf = NULL;
__PYX_ERR(0, 29, __pyx_L1_error)
} else {__pyx_pybuffernd_sorted_dets.diminfo[0].strides = __pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_sorted_dets.diminfo[0].shape = __pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_sorted_dets.diminfo[1].strides = __pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_sorted_dets.diminfo[1].shape = __pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer.shape[1];
}
}
__pyx_t_9 = 0;
__pyx_v_sorted_dets = ((PyArrayObject *)__pyx_t_5);
__pyx_t_5 = 0;
/* "nms/gpu_nms.pyx":31
* cdef np.ndarray[np.float32_t, ndim=2] \
* sorted_dets = dets[order, :]
* _nms(&keep[0], &num_out, &sorted_dets[0, 0], boxes_num, boxes_dim, thresh, device_id) # <<<<<<<<<<<<<<
* keep = keep[:num_out]
* return list(order[keep])
*/
__pyx_t_10 = 0;
__pyx_t_11 = -1;
if (__pyx_t_10 < 0) {
__pyx_t_10 += __pyx_pybuffernd_keep.diminfo[0].shape;
if (unlikely(__pyx_t_10 < 0)) __pyx_t_11 = 0;
} else if (unlikely(__pyx_t_10 >= __pyx_pybuffernd_keep.diminfo[0].shape)) __pyx_t_11 = 0;
if (unlikely(__pyx_t_11 != -1)) {
__Pyx_RaiseBufferIndexError(__pyx_t_11);
__PYX_ERR(0, 31, __pyx_L1_error)
}
__pyx_t_12 = 0;
__pyx_t_13 = 0;
__pyx_t_11 = -1;
if (__pyx_t_12 < 0) {
__pyx_t_12 += __pyx_pybuffernd_sorted_dets.diminfo[0].shape;
if (unlikely(__pyx_t_12 < 0)) __pyx_t_11 = 0;
} else if (unlikely(__pyx_t_12 >= __pyx_pybuffernd_sorted_dets.diminfo[0].shape)) __pyx_t_11 = 0;
if (__pyx_t_13 < 0) {
__pyx_t_13 += __pyx_pybuffernd_sorted_dets.diminfo[1].shape;
if (unlikely(__pyx_t_13 < 0)) __pyx_t_11 = 1;
} else if (unlikely(__pyx_t_13 >= __pyx_pybuffernd_sorted_dets.diminfo[1].shape)) __pyx_t_11 = 1;
if (unlikely(__pyx_t_11 != -1)) {
__Pyx_RaiseBufferIndexError(__pyx_t_11);
__PYX_ERR(0, 31, __pyx_L1_error)
}
__pyx_t_14 = __pyx_PyFloat_AsFloat(__pyx_v_thresh); if (unlikely((__pyx_t_14 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 31, __pyx_L1_error)
_nms((&(*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_int32_t *, __pyx_pybuffernd_keep.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_keep.diminfo[0].strides))), (&__pyx_v_num_out), (&(*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float32_t *, __pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_sorted_dets.diminfo[0].strides, __pyx_t_13, __pyx_pybuffernd_sorted_dets.diminfo[1].strides))), __pyx_v_boxes_num, __pyx_v_boxes_dim, __pyx_t_14, __pyx_v_device_id);
/* "nms/gpu_nms.pyx":32
* sorted_dets = dets[order, :]
* _nms(&keep[0], &num_out, &sorted_dets[0, 0], boxes_num, boxes_dim, thresh, device_id)
* keep = keep[:num_out] # <<<<<<<<<<<<<<
* return list(order[keep])
*/
__pyx_t_5 = __Pyx_PyObject_GetSlice(((PyObject *)__pyx_v_keep), 0, __pyx_v_num_out, NULL, NULL, NULL, 0, 1, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 32, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 32, __pyx_L1_error)
__pyx_t_6 = ((PyArrayObject *)__pyx_t_5);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_keep.rcbuffer->pybuffer);
__pyx_t_11 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_keep.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_5numpy_int32_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack);
if (unlikely(__pyx_t_11 < 0)) {
PyErr_Fetch(&__pyx_t_15, &__pyx_t_16, &__pyx_t_17);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_keep.rcbuffer->pybuffer, (PyObject*)__pyx_v_keep, &__Pyx_TypeInfo_nn___pyx_t_5numpy_int32_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_15); Py_XDECREF(__pyx_t_16); Py_XDECREF(__pyx_t_17);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_15, __pyx_t_16, __pyx_t_17);
}
}
__pyx_pybuffernd_keep.diminfo[0].strides = __pyx_pybuffernd_keep.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_keep.diminfo[0].shape = __pyx_pybuffernd_keep.rcbuffer->pybuffer.shape[0];
if (unlikely(__pyx_t_11 < 0)) __PYX_ERR(0, 32, __pyx_L1_error)
}
__pyx_t_6 = 0;
__Pyx_DECREF_SET(__pyx_v_keep, ((PyArrayObject *)__pyx_t_5));
__pyx_t_5 = 0;
/* "nms/gpu_nms.pyx":33
* _nms(&keep[0], &num_out, &sorted_dets[0, 0], boxes_num, boxes_dim, thresh, device_id)
* keep = keep[:num_out]
* return list(order[keep]) # <<<<<<<<<<<<<<
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_5 = PyObject_GetItem(((PyObject *)__pyx_v_order), ((PyObject *)__pyx_v_keep)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 33, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_1 = PySequence_List(__pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 33, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "nms/gpu_nms.pyx":16
* void _nms(np.int32_t*, int*, np.float32_t*, int, int, float, int)
*
* def gpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh, # <<<<<<<<<<<<<<
* np.int32_t device_id=0):
* cdef int boxes_num = dets.shape[0]
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
{ PyObject *__pyx_type, *__pyx_value, *__pyx_tb;
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_dets.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_keep.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_order.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_scores.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer);
__Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}
__Pyx_AddTraceback("nms.gpu_nms.gpu_nms", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
goto __pyx_L2;
__pyx_L0:;
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_dets.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_keep.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_order.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_scores.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer);
__pyx_L2:;
__Pyx_XDECREF((PyObject *)__pyx_v_keep);
__Pyx_XDECREF((PyObject *)__pyx_v_scores);
__Pyx_XDECREF((PyObject *)__pyx_v_order);
__Pyx_XDECREF((PyObject *)__pyx_v_sorted_dets);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":197
* # experimental exception made for __getbuffer__ and __releasebuffer__
* # -- the details of this may change.
* def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<<
* # This implementation of getbuffer is geared towards Cython
* # requirements, and does not yet fullfill the PEP.
*/
/* Python wrapper */
static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0);
__pyx_r = __pyx_pf_5numpy_7ndarray___getbuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_v_copy_shape;
int __pyx_v_i;
int __pyx_v_ndim;
int __pyx_v_endian_detector;
int __pyx_v_little_endian;
int __pyx_v_t;
char *__pyx_v_f;
PyArray_Descr *__pyx_v_descr = 0;
int __pyx_v_offset;
int __pyx_v_hasfields;
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
int __pyx_t_5;
PyObject *__pyx_t_6 = NULL;
char *__pyx_t_7;
__Pyx_RefNannySetupContext("__getbuffer__", 0);
if (__pyx_v_info != NULL) {
__pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
__Pyx_GIVEREF(__pyx_v_info->obj);
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":203
* # of flags
*
* if info == NULL: return # <<<<<<<<<<<<<<
*
* cdef int copy_shape, i, ndim
*/
__pyx_t_1 = ((__pyx_v_info == NULL) != 0);
if (__pyx_t_1) {
__pyx_r = 0;
goto __pyx_L0;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":206
*
* cdef int copy_shape, i, ndim
* cdef int endian_detector = 1 # <<<<<<<<<<<<<<
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
*
*/
__pyx_v_endian_detector = 1;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":207
* cdef int copy_shape, i, ndim
* cdef int endian_detector = 1
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<<
*
* ndim = PyArray_NDIM(self)
*/
__pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0);
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":209
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
*
* ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<<
*
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
*/
__pyx_v_ndim = PyArray_NDIM(__pyx_v_self);
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":211
* ndim = PyArray_NDIM(self)
*
* if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<<
* copy_shape = 1
* else:
*/
__pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0);
if (__pyx_t_1) {
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":212
*
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
* copy_shape = 1 # <<<<<<<<<<<<<<
* else:
* copy_shape = 0
*/
__pyx_v_copy_shape = 1;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":211
* ndim = PyArray_NDIM(self)
*
* if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<<
* copy_shape = 1
* else:
*/
goto __pyx_L4;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":214
* copy_shape = 1
* else:
* copy_shape = 0 # <<<<<<<<<<<<<<
*
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
*/
/*else*/ {
__pyx_v_copy_shape = 0;
}
__pyx_L4:;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":216
* copy_shape = 0
*
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<<
* and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
* raise ValueError(u"ndarray is not C contiguous")
*/
__pyx_t_2 = (((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L6_bool_binop_done;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":217
*
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): # <<<<<<<<<<<<<<
* raise ValueError(u"ndarray is not C contiguous")
*
*/
__pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_C_CONTIGUOUS) != 0)) != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L6_bool_binop_done:;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":216
* copy_shape = 0
*
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<<
* and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
* raise ValueError(u"ndarray is not C contiguous")
*/
if (__pyx_t_1) {
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":218
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
* raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<<
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 218, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 218, __pyx_L1_error)
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":216
* copy_shape = 0
*
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<<
* and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
* raise ValueError(u"ndarray is not C contiguous")
*/
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":220
* raise ValueError(u"ndarray is not C contiguous")
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<<
* and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
* raise ValueError(u"ndarray is not Fortran contiguous")
*/
__pyx_t_2 = (((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L9_bool_binop_done;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":221
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): # <<<<<<<<<<<<<<
* raise ValueError(u"ndarray is not Fortran contiguous")
*
*/
__pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_F_CONTIGUOUS) != 0)) != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L9_bool_binop_done:;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":220
* raise ValueError(u"ndarray is not C contiguous")
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<<
* and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
* raise ValueError(u"ndarray is not Fortran contiguous")
*/
if (__pyx_t_1) {
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":222
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
* raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<<
*
* info.buf = PyArray_DATA(self)
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 222, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 222, __pyx_L1_error)
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":220
* raise ValueError(u"ndarray is not C contiguous")
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<<
* and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
* raise ValueError(u"ndarray is not Fortran contiguous")
*/
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":224
* raise ValueError(u"ndarray is not Fortran contiguous")
*
* info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<<
* info.ndim = ndim
* if copy_shape:
*/
__pyx_v_info->buf = PyArray_DATA(__pyx_v_self);
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":225
*
* info.buf = PyArray_DATA(self)
* info.ndim = ndim # <<<<<<<<<<<<<<
* if copy_shape:
* # Allocate new buffer for strides and shape info.
*/
__pyx_v_info->ndim = __pyx_v_ndim;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":226
* info.buf = PyArray_DATA(self)
* info.ndim = ndim
* if copy_shape: # <<<<<<<<<<<<<<
* # Allocate new buffer for strides and shape info.
* # This is allocated as one block, strides first.
*/
__pyx_t_1 = (__pyx_v_copy_shape != 0);
if (__pyx_t_1) {
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":229
* # Allocate new buffer for strides and shape info.
* # This is allocated as one block, strides first.
* info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2) # <<<<<<<<<<<<<<
* info.shape = info.strides + ndim
* for i in range(ndim):
*/
__pyx_v_info->strides = ((Py_ssize_t *)malloc((((sizeof(Py_ssize_t)) * ((size_t)__pyx_v_ndim)) * 2)));
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":230
* # This is allocated as one block, strides first.
* info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2)
* info.shape = info.strides + ndim # <<<<<<<<<<<<<<
* for i in range(ndim):
* info.strides[i] = PyArray_STRIDES(self)[i]
*/
__pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim);
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":231
* info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2)
* info.shape = info.strides + ndim
* for i in range(ndim): # <<<<<<<<<<<<<<
* info.strides[i] = PyArray_STRIDES(self)[i]
* info.shape[i] = PyArray_DIMS(self)[i]
*/
__pyx_t_4 = __pyx_v_ndim;
for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) {
__pyx_v_i = __pyx_t_5;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":232
* info.shape = info.strides + ndim
* for i in range(ndim):
* info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<<
* info.shape[i] = PyArray_DIMS(self)[i]
* else:
*/
(__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]);
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":233
* for i in range(ndim):
* info.strides[i] = PyArray_STRIDES(self)[i]
* info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<<
* else:
* info.strides = <Py_ssize_t*>PyArray_STRIDES(self)
*/
(__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]);
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":226
* info.buf = PyArray_DATA(self)
* info.ndim = ndim
* if copy_shape: # <<<<<<<<<<<<<<
* # Allocate new buffer for strides and shape info.
* # This is allocated as one block, strides first.
*/
goto __pyx_L11;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":235
* info.shape[i] = PyArray_DIMS(self)[i]
* else:
* info.strides = <Py_ssize_t*>PyArray_STRIDES(self) # <<<<<<<<<<<<<<
* info.shape = <Py_ssize_t*>PyArray_DIMS(self)
* info.suboffsets = NULL
*/
/*else*/ {
__pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self));
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":236
* else:
* info.strides = <Py_ssize_t*>PyArray_STRIDES(self)
* info.shape = <Py_ssize_t*>PyArray_DIMS(self) # <<<<<<<<<<<<<<
* info.suboffsets = NULL
* info.itemsize = PyArray_ITEMSIZE(self)
*/
__pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(__pyx_v_self));
}
__pyx_L11:;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":237
* info.strides = <Py_ssize_t*>PyArray_STRIDES(self)
* info.shape = <Py_ssize_t*>PyArray_DIMS(self)
* info.suboffsets = NULL # <<<<<<<<<<<<<<
* info.itemsize = PyArray_ITEMSIZE(self)
* info.readonly = not PyArray_ISWRITEABLE(self)
*/
__pyx_v_info->suboffsets = NULL;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":238
* info.shape = <Py_ssize_t*>PyArray_DIMS(self)
* info.suboffsets = NULL
* info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<<
* info.readonly = not PyArray_ISWRITEABLE(self)
*
*/
__pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self);
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":239
* info.suboffsets = NULL
* info.itemsize = PyArray_ITEMSIZE(self)
* info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<<
*
* cdef int t
*/
__pyx_v_info->readonly = (!(PyArray_ISWRITEABLE(__pyx_v_self) != 0));
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":242
*
* cdef int t
* cdef char* f = NULL # <<<<<<<<<<<<<<
* cdef dtype descr = self.descr
* cdef int offset
*/
__pyx_v_f = NULL;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":243
* cdef int t
* cdef char* f = NULL
* cdef dtype descr = self.descr # <<<<<<<<<<<<<<
* cdef int offset
*
*/
__pyx_t_3 = ((PyObject *)__pyx_v_self->descr);
__Pyx_INCREF(__pyx_t_3);
__pyx_v_descr = ((PyArray_Descr *)__pyx_t_3);
__pyx_t_3 = 0;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":246
* cdef int offset
*
* cdef bint hasfields = PyDataType_HASFIELDS(descr) # <<<<<<<<<<<<<<
*
* if not hasfields and not copy_shape:
*/
__pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr);
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":248
* cdef bint hasfields = PyDataType_HASFIELDS(descr)
*
* if not hasfields and not copy_shape: # <<<<<<<<<<<<<<
* # do not call releasebuffer
* info.obj = None
*/
__pyx_t_2 = ((!(__pyx_v_hasfields != 0)) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L15_bool_binop_done;
}
__pyx_t_2 = ((!(__pyx_v_copy_shape != 0)) != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L15_bool_binop_done:;
if (__pyx_t_1) {
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":250
* if not hasfields and not copy_shape:
* # do not call releasebuffer
* info.obj = None # <<<<<<<<<<<<<<
* else:
* # need to call releasebuffer
*/
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = Py_None;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":248
* cdef bint hasfields = PyDataType_HASFIELDS(descr)
*
* if not hasfields and not copy_shape: # <<<<<<<<<<<<<<
* # do not call releasebuffer
* info.obj = None
*/
goto __pyx_L14;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":253
* else:
* # need to call releasebuffer
* info.obj = self # <<<<<<<<<<<<<<
*
* if not hasfields:
*/
/*else*/ {
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = ((PyObject *)__pyx_v_self);
}
__pyx_L14:;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":255
* info.obj = self
*
* if not hasfields: # <<<<<<<<<<<<<<
* t = descr.type_num
* if ((descr.byteorder == c'>' and little_endian) or
*/
__pyx_t_1 = ((!(__pyx_v_hasfields != 0)) != 0);
if (__pyx_t_1) {
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":256
*
* if not hasfields:
* t = descr.type_num # <<<<<<<<<<<<<<
* if ((descr.byteorder == c'>' and little_endian) or
* (descr.byteorder == c'<' and not little_endian)):
*/
__pyx_t_4 = __pyx_v_descr->type_num;
__pyx_v_t = __pyx_t_4;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":257
* if not hasfields:
* t = descr.type_num
* if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<<
* (descr.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
*/
__pyx_t_2 = ((__pyx_v_descr->byteorder == '>') != 0);
if (!__pyx_t_2) {
goto __pyx_L20_next_or;
} else {
}
__pyx_t_2 = (__pyx_v_little_endian != 0);
if (!__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L19_bool_binop_done;
}
__pyx_L20_next_or:;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":258
* t = descr.type_num
* if ((descr.byteorder == c'>' and little_endian) or
* (descr.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<<
* raise ValueError(u"Non-native byte order not supported")
* if t == NPY_BYTE: f = "b"
*/
__pyx_t_2 = ((__pyx_v_descr->byteorder == '<') != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L19_bool_binop_done;
}
__pyx_t_2 = ((!(__pyx_v_little_endian != 0)) != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L19_bool_binop_done:;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":257
* if not hasfields:
* t = descr.type_num
* if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<<
* (descr.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
*/
if (__pyx_t_1) {
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":259
* if ((descr.byteorder == c'>' and little_endian) or
* (descr.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<<
* if t == NPY_BYTE: f = "b"
* elif t == NPY_UBYTE: f = "B"
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 259, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 259, __pyx_L1_error)
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":257
* if not hasfields:
* t = descr.type_num
* if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<<
* (descr.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
*/
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":260
* (descr.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
* if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<<
* elif t == NPY_UBYTE: f = "B"
* elif t == NPY_SHORT: f = "h"
*/
switch (__pyx_v_t) {
case NPY_BYTE:
__pyx_v_f = ((char *)"b");
break;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":261
* raise ValueError(u"Non-native byte order not supported")
* if t == NPY_BYTE: f = "b"
* elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<<
* elif t == NPY_SHORT: f = "h"
* elif t == NPY_USHORT: f = "H"
*/
case NPY_UBYTE:
__pyx_v_f = ((char *)"B");
break;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":262
* if t == NPY_BYTE: f = "b"
* elif t == NPY_UBYTE: f = "B"
* elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<<
* elif t == NPY_USHORT: f = "H"
* elif t == NPY_INT: f = "i"
*/
case NPY_SHORT:
__pyx_v_f = ((char *)"h");
break;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":263
* elif t == NPY_UBYTE: f = "B"
* elif t == NPY_SHORT: f = "h"
* elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<<
* elif t == NPY_INT: f = "i"
* elif t == NPY_UINT: f = "I"
*/
case NPY_USHORT:
__pyx_v_f = ((char *)"H");
break;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":264
* elif t == NPY_SHORT: f = "h"
* elif t == NPY_USHORT: f = "H"
* elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<<
* elif t == NPY_UINT: f = "I"
* elif t == NPY_LONG: f = "l"
*/
case NPY_INT:
__pyx_v_f = ((char *)"i");
break;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":265
* elif t == NPY_USHORT: f = "H"
* elif t == NPY_INT: f = "i"
* elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<<
* elif t == NPY_LONG: f = "l"
* elif t == NPY_ULONG: f = "L"
*/
case NPY_UINT:
__pyx_v_f = ((char *)"I");
break;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":266
* elif t == NPY_INT: f = "i"
* elif t == NPY_UINT: f = "I"
* elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<<
* elif t == NPY_ULONG: f = "L"
* elif t == NPY_LONGLONG: f = "q"
*/
case NPY_LONG:
__pyx_v_f = ((char *)"l");
break;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":267
* elif t == NPY_UINT: f = "I"
* elif t == NPY_LONG: f = "l"
* elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<<
* elif t == NPY_LONGLONG: f = "q"
* elif t == NPY_ULONGLONG: f = "Q"
*/
case NPY_ULONG:
__pyx_v_f = ((char *)"L");
break;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":268
* elif t == NPY_LONG: f = "l"
* elif t == NPY_ULONG: f = "L"
* elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<<
* elif t == NPY_ULONGLONG: f = "Q"
* elif t == NPY_FLOAT: f = "f"
*/
case NPY_LONGLONG:
__pyx_v_f = ((char *)"q");
break;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":269
* elif t == NPY_ULONG: f = "L"
* elif t == NPY_LONGLONG: f = "q"
* elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<<
* elif t == NPY_FLOAT: f = "f"
* elif t == NPY_DOUBLE: f = "d"
*/
case NPY_ULONGLONG:
__pyx_v_f = ((char *)"Q");
break;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":270
* elif t == NPY_LONGLONG: f = "q"
* elif t == NPY_ULONGLONG: f = "Q"
* elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<<
* elif t == NPY_DOUBLE: f = "d"
* elif t == NPY_LONGDOUBLE: f = "g"
*/
case NPY_FLOAT:
__pyx_v_f = ((char *)"f");
break;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":271
* elif t == NPY_ULONGLONG: f = "Q"
* elif t == NPY_FLOAT: f = "f"
* elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<<
* elif t == NPY_LONGDOUBLE: f = "g"
* elif t == NPY_CFLOAT: f = "Zf"
*/
case NPY_DOUBLE:
__pyx_v_f = ((char *)"d");
break;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":272
* elif t == NPY_FLOAT: f = "f"
* elif t == NPY_DOUBLE: f = "d"
* elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<<
* elif t == NPY_CFLOAT: f = "Zf"
* elif t == NPY_CDOUBLE: f = "Zd"
*/
case NPY_LONGDOUBLE:
__pyx_v_f = ((char *)"g");
break;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":273
* elif t == NPY_DOUBLE: f = "d"
* elif t == NPY_LONGDOUBLE: f = "g"
* elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<<
* elif t == NPY_CDOUBLE: f = "Zd"
* elif t == NPY_CLONGDOUBLE: f = "Zg"
*/
case NPY_CFLOAT:
__pyx_v_f = ((char *)"Zf");
break;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":274
* elif t == NPY_LONGDOUBLE: f = "g"
* elif t == NPY_CFLOAT: f = "Zf"
* elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<<
* elif t == NPY_CLONGDOUBLE: f = "Zg"
* elif t == NPY_OBJECT: f = "O"
*/
case NPY_CDOUBLE:
__pyx_v_f = ((char *)"Zd");
break;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":275
* elif t == NPY_CFLOAT: f = "Zf"
* elif t == NPY_CDOUBLE: f = "Zd"
* elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<<
* elif t == NPY_OBJECT: f = "O"
* else:
*/
case NPY_CLONGDOUBLE:
__pyx_v_f = ((char *)"Zg");
break;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":276
* elif t == NPY_CDOUBLE: f = "Zd"
* elif t == NPY_CLONGDOUBLE: f = "Zg"
* elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<<
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
*/
case NPY_OBJECT:
__pyx_v_f = ((char *)"O");
break;
default:
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":278
* elif t == NPY_OBJECT: f = "O"
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<<
* info.format = f
* return
*/
__pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 278, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_6 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_t_3); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 278, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 278, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_6);
__pyx_t_6 = 0;
__pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 278, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_Raise(__pyx_t_6, 0, 0, 0);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__PYX_ERR(1, 278, __pyx_L1_error)
break;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":279
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
* info.format = f # <<<<<<<<<<<<<<
* return
* else:
*/
__pyx_v_info->format = __pyx_v_f;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":280
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
* info.format = f
* return # <<<<<<<<<<<<<<
* else:
* info.format = <char*>stdlib.malloc(_buffer_format_string_len)
*/
__pyx_r = 0;
goto __pyx_L0;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":255
* info.obj = self
*
* if not hasfields: # <<<<<<<<<<<<<<
* t = descr.type_num
* if ((descr.byteorder == c'>' and little_endian) or
*/
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":282
* return
* else:
* info.format = <char*>stdlib.malloc(_buffer_format_string_len) # <<<<<<<<<<<<<<
* info.format[0] = c'^' # Native data types, manual alignment
* offset = 0
*/
/*else*/ {
__pyx_v_info->format = ((char *)malloc(0xFF));
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":283
* else:
* info.format = <char*>stdlib.malloc(_buffer_format_string_len)
* info.format[0] = c'^' # Native data types, manual alignment # <<<<<<<<<<<<<<
* offset = 0
* f = _util_dtypestring(descr, info.format + 1,
*/
(__pyx_v_info->format[0]) = '^';
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":284
* info.format = <char*>stdlib.malloc(_buffer_format_string_len)
* info.format[0] = c'^' # Native data types, manual alignment
* offset = 0 # <<<<<<<<<<<<<<
* f = _util_dtypestring(descr, info.format + 1,
* info.format + _buffer_format_string_len,
*/
__pyx_v_offset = 0;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":285
* info.format[0] = c'^' # Native data types, manual alignment
* offset = 0
* f = _util_dtypestring(descr, info.format + 1, # <<<<<<<<<<<<<<
* info.format + _buffer_format_string_len,
* &offset)
*/
__pyx_t_7 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 0xFF), (&__pyx_v_offset)); if (unlikely(__pyx_t_7 == NULL)) __PYX_ERR(1, 285, __pyx_L1_error)
__pyx_v_f = __pyx_t_7;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":288
* info.format + _buffer_format_string_len,
* &offset)
* f[0] = c'\0' # Terminate format string # <<<<<<<<<<<<<<
*
* def __releasebuffer__(ndarray self, Py_buffer* info):
*/
(__pyx_v_f[0]) = '\x00';
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":197
* # experimental exception made for __getbuffer__ and __releasebuffer__
* # -- the details of this may change.
* def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<<
* # This implementation of getbuffer is geared towards Cython
* # requirements, and does not yet fullfill the PEP.
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL;
}
goto __pyx_L2;
__pyx_L0:;
if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) {
__Pyx_GOTREF(Py_None);
__Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL;
}
__pyx_L2:;
__Pyx_XDECREF((PyObject *)__pyx_v_descr);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":290
* f[0] = c'\0' # Terminate format string
*
* def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<<
* if PyArray_HASFIELDS(self):
* stdlib.free(info.format)
*/
/* Python wrapper */
static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/
static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__releasebuffer__ (wrapper)", 0);
__pyx_pf_5numpy_7ndarray_2__releasebuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info) {
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("__releasebuffer__", 0);
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":291
*
* def __releasebuffer__(ndarray self, Py_buffer* info):
* if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<<
* stdlib.free(info.format)
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
*/
__pyx_t_1 = (PyArray_HASFIELDS(__pyx_v_self) != 0);
if (__pyx_t_1) {
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":292
* def __releasebuffer__(ndarray self, Py_buffer* info):
* if PyArray_HASFIELDS(self):
* stdlib.free(info.format) # <<<<<<<<<<<<<<
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
* stdlib.free(info.strides)
*/
free(__pyx_v_info->format);
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":291
*
* def __releasebuffer__(ndarray self, Py_buffer* info):
* if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<<
* stdlib.free(info.format)
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
*/
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":293
* if PyArray_HASFIELDS(self):
* stdlib.free(info.format)
* if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<<
* stdlib.free(info.strides)
* # info.shape was stored after info.strides in the same block
*/
__pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0);
if (__pyx_t_1) {
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":294
* stdlib.free(info.format)
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
* stdlib.free(info.strides) # <<<<<<<<<<<<<<
* # info.shape was stored after info.strides in the same block
*
*/
free(__pyx_v_info->strides);
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":293
* if PyArray_HASFIELDS(self):
* stdlib.free(info.format)
* if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<<
* stdlib.free(info.strides)
* # info.shape was stored after info.strides in the same block
*/
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":290
* f[0] = c'\0' # Terminate format string
*
* def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<<
* if PyArray_HASFIELDS(self):
* stdlib.free(info.format)
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":770
* ctypedef npy_cdouble complex_t
*
* cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(1, <void*>a)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0);
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":771
*
* cdef inline object PyArray_MultiIterNew1(a):
* return PyArray_MultiIterNew(1, <void*>a) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew2(a, b):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 771, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":770
* ctypedef npy_cdouble complex_t
*
* cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(1, <void*>a)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":773
* return PyArray_MultiIterNew(1, <void*>a)
*
* cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0);
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":774
*
* cdef inline object PyArray_MultiIterNew2(a, b):
* return PyArray_MultiIterNew(2, <void*>a, <void*>b) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew3(a, b, c):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 774, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":773
* return PyArray_MultiIterNew(1, <void*>a)
*
* cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":776
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
* cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0);
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":777
*
* cdef inline object PyArray_MultiIterNew3(a, b, c):
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 777, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":776
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
* cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":779
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0);
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":780
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d):
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 780, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":779
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":782
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0);
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":783
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) # <<<<<<<<<<<<<<
*
* cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL:
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 783, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":782
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":785
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
* cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<<
* # Recursive utility function used in __getbuffer__ to get format
* # string. The new location in the format string is returned.
*/
static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) {
PyArray_Descr *__pyx_v_child = 0;
int __pyx_v_endian_detector;
int __pyx_v_little_endian;
PyObject *__pyx_v_fields = 0;
PyObject *__pyx_v_childname = NULL;
PyObject *__pyx_v_new_offset = NULL;
PyObject *__pyx_v_t = NULL;
char *__pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
Py_ssize_t __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
int __pyx_t_5;
int __pyx_t_6;
int __pyx_t_7;
long __pyx_t_8;
char *__pyx_t_9;
__Pyx_RefNannySetupContext("_util_dtypestring", 0);
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":790
*
* cdef dtype child
* cdef int endian_detector = 1 # <<<<<<<<<<<<<<
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
* cdef tuple fields
*/
__pyx_v_endian_detector = 1;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":791
* cdef dtype child
* cdef int endian_detector = 1
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<<
* cdef tuple fields
*
*/
__pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0);
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":794
* cdef tuple fields
*
* for childname in descr.names: # <<<<<<<<<<<<<<
* fields = descr.fields[childname]
* child, new_offset = fields
*/
if (unlikely(__pyx_v_descr->names == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
__PYX_ERR(1, 794, __pyx_L1_error)
}
__pyx_t_1 = __pyx_v_descr->names; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0;
for (;;) {
if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break;
#if CYTHON_COMPILING_IN_CPYTHON
__pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) __PYX_ERR(1, 794, __pyx_L1_error)
#else
__pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 794, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
#endif
__Pyx_XDECREF_SET(__pyx_v_childname, __pyx_t_3);
__pyx_t_3 = 0;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":795
*
* for childname in descr.names:
* fields = descr.fields[childname] # <<<<<<<<<<<<<<
* child, new_offset = fields
*
*/
if (unlikely(__pyx_v_descr->fields == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 795, __pyx_L1_error)
}
__pyx_t_3 = __Pyx_PyDict_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 795, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_t_3)->tp_name), 0))) __PYX_ERR(1, 795, __pyx_L1_error)
__Pyx_XDECREF_SET(__pyx_v_fields, ((PyObject*)__pyx_t_3));
__pyx_t_3 = 0;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":796
* for childname in descr.names:
* fields = descr.fields[childname]
* child, new_offset = fields # <<<<<<<<<<<<<<
*
* if (end - f) - <int>(new_offset - offset[0]) < 15:
*/
if (likely(__pyx_v_fields != Py_None)) {
PyObject* sequence = __pyx_v_fields;
#if CYTHON_COMPILING_IN_CPYTHON
Py_ssize_t size = Py_SIZE(sequence);
#else
Py_ssize_t size = PySequence_Size(sequence);
#endif
if (unlikely(size != 2)) {
if (size > 2) __Pyx_RaiseTooManyValuesError(2);
else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
__PYX_ERR(1, 796, __pyx_L1_error)
}
#if CYTHON_COMPILING_IN_CPYTHON
__pyx_t_3 = PyTuple_GET_ITEM(sequence, 0);
__pyx_t_4 = PyTuple_GET_ITEM(sequence, 1);
__Pyx_INCREF(__pyx_t_3);
__Pyx_INCREF(__pyx_t_4);
#else
__pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 796, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 796, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
#endif
} else {
__Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 796, __pyx_L1_error)
}
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) __PYX_ERR(1, 796, __pyx_L1_error)
__Pyx_XDECREF_SET(__pyx_v_child, ((PyArray_Descr *)__pyx_t_3));
__pyx_t_3 = 0;
__Pyx_XDECREF_SET(__pyx_v_new_offset, __pyx_t_4);
__pyx_t_4 = 0;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":798
* child, new_offset = fields
*
* if (end - f) - <int>(new_offset - offset[0]) < 15: # <<<<<<<<<<<<<<
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
*
*/
__pyx_t_4 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 798, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 798, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 798, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = ((((__pyx_v_end - __pyx_v_f) - ((int)__pyx_t_5)) < 15) != 0);
if (__pyx_t_6) {
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":799
*
* if (end - f) - <int>(new_offset - offset[0]) < 15:
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<<
*
* if ((child.byteorder == c'>' and little_endian) or
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 799, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 799, __pyx_L1_error)
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":798
* child, new_offset = fields
*
* if (end - f) - <int>(new_offset - offset[0]) < 15: # <<<<<<<<<<<<<<
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
*
*/
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":801
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
*
* if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<<
* (child.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
*/
__pyx_t_7 = ((__pyx_v_child->byteorder == '>') != 0);
if (!__pyx_t_7) {
goto __pyx_L8_next_or;
} else {
}
__pyx_t_7 = (__pyx_v_little_endian != 0);
if (!__pyx_t_7) {
} else {
__pyx_t_6 = __pyx_t_7;
goto __pyx_L7_bool_binop_done;
}
__pyx_L8_next_or:;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":802
*
* if ((child.byteorder == c'>' and little_endian) or
* (child.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<<
* raise ValueError(u"Non-native byte order not supported")
* # One could encode it in the format string and have Cython
*/
__pyx_t_7 = ((__pyx_v_child->byteorder == '<') != 0);
if (__pyx_t_7) {
} else {
__pyx_t_6 = __pyx_t_7;
goto __pyx_L7_bool_binop_done;
}
__pyx_t_7 = ((!(__pyx_v_little_endian != 0)) != 0);
__pyx_t_6 = __pyx_t_7;
__pyx_L7_bool_binop_done:;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":801
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
*
* if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<<
* (child.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
*/
if (__pyx_t_6) {
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":803
* if ((child.byteorder == c'>' and little_endian) or
* (child.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<<
* # One could encode it in the format string and have Cython
* # complain instead, BUT: < and > in format strings also imply
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 803, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 803, __pyx_L1_error)
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":801
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
*
* if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<<
* (child.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
*/
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":813
*
* # Output padding bytes
* while offset[0] < new_offset: # <<<<<<<<<<<<<<
* f[0] = 120 # "x"; pad byte
* f += 1
*/
while (1) {
__pyx_t_3 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 813, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_t_3, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 813, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 813, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (!__pyx_t_6) break;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":814
* # Output padding bytes
* while offset[0] < new_offset:
* f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<<
* f += 1
* offset[0] += 1
*/
(__pyx_v_f[0]) = 0x78;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":815
* while offset[0] < new_offset:
* f[0] = 120 # "x"; pad byte
* f += 1 # <<<<<<<<<<<<<<
* offset[0] += 1
*
*/
__pyx_v_f = (__pyx_v_f + 1);
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":816
* f[0] = 120 # "x"; pad byte
* f += 1
* offset[0] += 1 # <<<<<<<<<<<<<<
*
* offset[0] += child.itemsize
*/
__pyx_t_8 = 0;
(__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + 1);
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":818
* offset[0] += 1
*
* offset[0] += child.itemsize # <<<<<<<<<<<<<<
*
* if not PyDataType_HASFIELDS(child):
*/
__pyx_t_8 = 0;
(__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + __pyx_v_child->elsize);
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":820
* offset[0] += child.itemsize
*
* if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<<
* t = child.type_num
* if end - f < 5:
*/
__pyx_t_6 = ((!(PyDataType_HASFIELDS(__pyx_v_child) != 0)) != 0);
if (__pyx_t_6) {
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":821
*
* if not PyDataType_HASFIELDS(child):
* t = child.type_num # <<<<<<<<<<<<<<
* if end - f < 5:
* raise RuntimeError(u"Format string allocated too short.")
*/
__pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_child->type_num); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 821, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_XDECREF_SET(__pyx_v_t, __pyx_t_4);
__pyx_t_4 = 0;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":822
* if not PyDataType_HASFIELDS(child):
* t = child.type_num
* if end - f < 5: # <<<<<<<<<<<<<<
* raise RuntimeError(u"Format string allocated too short.")
*
*/
__pyx_t_6 = (((__pyx_v_end - __pyx_v_f) < 5) != 0);
if (__pyx_t_6) {
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":823
* t = child.type_num
* if end - f < 5:
* raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<<
*
* # Until ticket #99 is fixed, use integers to avoid warnings
*/
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 823, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_Raise(__pyx_t_4, 0, 0, 0);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__PYX_ERR(1, 823, __pyx_L1_error)
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":822
* if not PyDataType_HASFIELDS(child):
* t = child.type_num
* if end - f < 5: # <<<<<<<<<<<<<<
* raise RuntimeError(u"Format string allocated too short.")
*
*/
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":826
*
* # Until ticket #99 is fixed, use integers to avoid warnings
* if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<<
* elif t == NPY_UBYTE: f[0] = 66 #"B"
* elif t == NPY_SHORT: f[0] = 104 #"h"
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_BYTE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 826, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 826, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 826, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 98;
goto __pyx_L15;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":827
* # Until ticket #99 is fixed, use integers to avoid warnings
* if t == NPY_BYTE: f[0] = 98 #"b"
* elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<<
* elif t == NPY_SHORT: f[0] = 104 #"h"
* elif t == NPY_USHORT: f[0] = 72 #"H"
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UBYTE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 827, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 827, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 827, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 66;
goto __pyx_L15;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":828
* if t == NPY_BYTE: f[0] = 98 #"b"
* elif t == NPY_UBYTE: f[0] = 66 #"B"
* elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<<
* elif t == NPY_USHORT: f[0] = 72 #"H"
* elif t == NPY_INT: f[0] = 105 #"i"
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_SHORT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 828, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 828, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 828, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 0x68;
goto __pyx_L15;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":829
* elif t == NPY_UBYTE: f[0] = 66 #"B"
* elif t == NPY_SHORT: f[0] = 104 #"h"
* elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<<
* elif t == NPY_INT: f[0] = 105 #"i"
* elif t == NPY_UINT: f[0] = 73 #"I"
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_USHORT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 829, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 829, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 829, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 72;
goto __pyx_L15;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":830
* elif t == NPY_SHORT: f[0] = 104 #"h"
* elif t == NPY_USHORT: f[0] = 72 #"H"
* elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<<
* elif t == NPY_UINT: f[0] = 73 #"I"
* elif t == NPY_LONG: f[0] = 108 #"l"
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_INT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 830, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 830, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 830, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 0x69;
goto __pyx_L15;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":831
* elif t == NPY_USHORT: f[0] = 72 #"H"
* elif t == NPY_INT: f[0] = 105 #"i"
* elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<<
* elif t == NPY_LONG: f[0] = 108 #"l"
* elif t == NPY_ULONG: f[0] = 76 #"L"
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UINT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 831, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 831, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 831, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 73;
goto __pyx_L15;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":832
* elif t == NPY_INT: f[0] = 105 #"i"
* elif t == NPY_UINT: f[0] = 73 #"I"
* elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<<
* elif t == NPY_ULONG: f[0] = 76 #"L"
* elif t == NPY_LONGLONG: f[0] = 113 #"q"
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 832, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 832, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 832, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 0x6C;
goto __pyx_L15;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":833
* elif t == NPY_UINT: f[0] = 73 #"I"
* elif t == NPY_LONG: f[0] = 108 #"l"
* elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<<
* elif t == NPY_LONGLONG: f[0] = 113 #"q"
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 833, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 833, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 833, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 76;
goto __pyx_L15;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":834
* elif t == NPY_LONG: f[0] = 108 #"l"
* elif t == NPY_ULONG: f[0] = 76 #"L"
* elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<<
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
* elif t == NPY_FLOAT: f[0] = 102 #"f"
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGLONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 834, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 834, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 834, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 0x71;
goto __pyx_L15;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":835
* elif t == NPY_ULONG: f[0] = 76 #"L"
* elif t == NPY_LONGLONG: f[0] = 113 #"q"
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<<
* elif t == NPY_FLOAT: f[0] = 102 #"f"
* elif t == NPY_DOUBLE: f[0] = 100 #"d"
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONGLONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 835, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 835, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 835, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 81;
goto __pyx_L15;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":836
* elif t == NPY_LONGLONG: f[0] = 113 #"q"
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
* elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<<
* elif t == NPY_DOUBLE: f[0] = 100 #"d"
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_FLOAT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 836, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 836, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 836, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 0x66;
goto __pyx_L15;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":837
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
* elif t == NPY_FLOAT: f[0] = 102 #"f"
* elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<<
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_DOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 837, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 837, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 837, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 0x64;
goto __pyx_L15;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":838
* elif t == NPY_FLOAT: f[0] = 102 #"f"
* elif t == NPY_DOUBLE: f[0] = 100 #"d"
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<<
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 838, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 838, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 838, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 0x67;
goto __pyx_L15;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":839
* elif t == NPY_DOUBLE: f[0] = 100 #"d"
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<<
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
* elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CFLOAT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 839, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 839, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 839, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 90;
(__pyx_v_f[1]) = 0x66;
__pyx_v_f = (__pyx_v_f + 1);
goto __pyx_L15;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":840
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<<
* elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
* elif t == NPY_OBJECT: f[0] = 79 #"O"
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 840, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 840, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 840, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 90;
(__pyx_v_f[1]) = 0x64;
__pyx_v_f = (__pyx_v_f + 1);
goto __pyx_L15;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":841
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
* elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<<
* elif t == NPY_OBJECT: f[0] = 79 #"O"
* else:
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 841, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 841, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 841, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 90;
(__pyx_v_f[1]) = 0x67;
__pyx_v_f = (__pyx_v_f + 1);
goto __pyx_L15;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":842
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
* elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
* elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<<
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_OBJECT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 842, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 842, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 842, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 79;
goto __pyx_L15;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":844
* elif t == NPY_OBJECT: f[0] = 79 #"O"
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<<
* f += 1
* else:
*/
/*else*/ {
__pyx_t_3 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 844, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 844, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3);
__pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 844, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 844, __pyx_L1_error)
}
__pyx_L15:;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":845
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
* f += 1 # <<<<<<<<<<<<<<
* else:
* # Cython ignores struct boundary information ("T{...}"),
*/
__pyx_v_f = (__pyx_v_f + 1);
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":820
* offset[0] += child.itemsize
*
* if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<<
* t = child.type_num
* if end - f < 5:
*/
goto __pyx_L13;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":849
* # Cython ignores struct boundary information ("T{...}"),
* # so don't output it
* f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<<
* return f
*
*/
/*else*/ {
__pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_9 == NULL)) __PYX_ERR(1, 849, __pyx_L1_error)
__pyx_v_f = __pyx_t_9;
}
__pyx_L13:;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":794
* cdef tuple fields
*
* for childname in descr.names: # <<<<<<<<<<<<<<
* fields = descr.fields[childname]
* child, new_offset = fields
*/
}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":850
* # so don't output it
* f = _util_dtypestring(child, f, end, offset)
* return f # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_f;
goto __pyx_L0;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":785
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
* cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<<
* # Recursive utility function used in __getbuffer__ to get format
* # string. The new location in the format string is returned.
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_child);
__Pyx_XDECREF(__pyx_v_fields);
__Pyx_XDECREF(__pyx_v_childname);
__Pyx_XDECREF(__pyx_v_new_offset);
__Pyx_XDECREF(__pyx_v_t);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":966
*
*
* cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<<
* cdef PyObject* baseptr
* if base is None:
*/
static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) {
PyObject *__pyx_v_baseptr;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
__Pyx_RefNannySetupContext("set_array_base", 0);
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":968
* cdef inline void set_array_base(ndarray arr, object base):
* cdef PyObject* baseptr
* if base is None: # <<<<<<<<<<<<<<
* baseptr = NULL
* else:
*/
__pyx_t_1 = (__pyx_v_base == Py_None);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":969
* cdef PyObject* baseptr
* if base is None:
* baseptr = NULL # <<<<<<<<<<<<<<
* else:
* Py_INCREF(base) # important to do this before decref below!
*/
__pyx_v_baseptr = NULL;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":968
* cdef inline void set_array_base(ndarray arr, object base):
* cdef PyObject* baseptr
* if base is None: # <<<<<<<<<<<<<<
* baseptr = NULL
* else:
*/
goto __pyx_L3;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":971
* baseptr = NULL
* else:
* Py_INCREF(base) # important to do this before decref below! # <<<<<<<<<<<<<<
* baseptr = <PyObject*>base
* Py_XDECREF(arr.base)
*/
/*else*/ {
Py_INCREF(__pyx_v_base);
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":972
* else:
* Py_INCREF(base) # important to do this before decref below!
* baseptr = <PyObject*>base # <<<<<<<<<<<<<<
* Py_XDECREF(arr.base)
* arr.base = baseptr
*/
__pyx_v_baseptr = ((PyObject *)__pyx_v_base);
}
__pyx_L3:;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":973
* Py_INCREF(base) # important to do this before decref below!
* baseptr = <PyObject*>base
* Py_XDECREF(arr.base) # <<<<<<<<<<<<<<
* arr.base = baseptr
*
*/
Py_XDECREF(__pyx_v_arr->base);
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":974
* baseptr = <PyObject*>base
* Py_XDECREF(arr.base)
* arr.base = baseptr # <<<<<<<<<<<<<<
*
* cdef inline object get_array_base(ndarray arr):
*/
__pyx_v_arr->base = __pyx_v_baseptr;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":966
*
*
* cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<<
* cdef PyObject* baseptr
* if base is None:
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":976
* arr.base = baseptr
*
* cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<<
* if arr.base is NULL:
* return None
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("get_array_base", 0);
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":977
*
* cdef inline object get_array_base(ndarray arr):
* if arr.base is NULL: # <<<<<<<<<<<<<<
* return None
* else:
*/
__pyx_t_1 = ((__pyx_v_arr->base == NULL) != 0);
if (__pyx_t_1) {
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":978
* cdef inline object get_array_base(ndarray arr):
* if arr.base is NULL:
* return None # <<<<<<<<<<<<<<
* else:
* return <object>arr.base
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(Py_None);
__pyx_r = Py_None;
goto __pyx_L0;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":977
*
* cdef inline object get_array_base(ndarray arr):
* if arr.base is NULL: # <<<<<<<<<<<<<<
* return None
* else:
*/
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":980
* return None
* else:
* return <object>arr.base # <<<<<<<<<<<<<<
*/
/*else*/ {
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_arr->base));
__pyx_r = ((PyObject *)__pyx_v_arr->base);
goto __pyx_L0;
}
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":976
* arr.base = baseptr
*
* cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<<
* if arr.base is NULL:
* return None
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyMethodDef __pyx_methods[] = {
{0, 0, 0, 0}
};
#if PY_MAJOR_VERSION >= 3
static struct PyModuleDef __pyx_moduledef = {
#if PY_VERSION_HEX < 0x03020000
{ PyObject_HEAD_INIT(NULL) NULL, 0, NULL },
#else
PyModuleDef_HEAD_INIT,
#endif
"gpu_nms",
0, /* m_doc */
-1, /* m_size */
__pyx_methods /* m_methods */,
NULL, /* m_reload */
NULL, /* m_traverse */
NULL, /* m_clear */
NULL /* m_free */
};
#endif
static __Pyx_StringTabEntry __pyx_string_tab[] = {
{&__pyx_kp_s_D_v_zix_caffe_caffe_win_20160523, __pyx_k_D_v_zix_caffe_caffe_win_20160523, sizeof(__pyx_k_D_v_zix_caffe_caffe_win_20160523), 0, 0, 1, 0},
{&__pyx_kp_u_Format_string_allocated_too_shor, __pyx_k_Format_string_allocated_too_shor, sizeof(__pyx_k_Format_string_allocated_too_shor), 0, 1, 0, 0},
{&__pyx_kp_u_Format_string_allocated_too_shor_2, __pyx_k_Format_string_allocated_too_shor_2, sizeof(__pyx_k_Format_string_allocated_too_shor_2), 0, 1, 0, 0},
{&__pyx_kp_u_Non_native_byte_order_not_suppor, __pyx_k_Non_native_byte_order_not_suppor, sizeof(__pyx_k_Non_native_byte_order_not_suppor), 0, 1, 0, 0},
{&__pyx_n_s_RuntimeError, __pyx_k_RuntimeError, sizeof(__pyx_k_RuntimeError), 0, 0, 1, 1},
{&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1},
{&__pyx_n_s_argsort, __pyx_k_argsort, sizeof(__pyx_k_argsort), 0, 0, 1, 1},
{&__pyx_n_s_boxes_dim, __pyx_k_boxes_dim, sizeof(__pyx_k_boxes_dim), 0, 0, 1, 1},
{&__pyx_n_s_boxes_num, __pyx_k_boxes_num, sizeof(__pyx_k_boxes_num), 0, 0, 1, 1},
{&__pyx_n_s_dets, __pyx_k_dets, sizeof(__pyx_k_dets), 0, 0, 1, 1},
{&__pyx_n_s_device_id, __pyx_k_device_id, sizeof(__pyx_k_device_id), 0, 0, 1, 1},
{&__pyx_n_s_dtype, __pyx_k_dtype, sizeof(__pyx_k_dtype), 0, 0, 1, 1},
{&__pyx_n_s_gpu_nms, __pyx_k_gpu_nms, sizeof(__pyx_k_gpu_nms), 0, 0, 1, 1},
{&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1},
{&__pyx_n_s_int32, __pyx_k_int32, sizeof(__pyx_k_int32), 0, 0, 1, 1},
{&__pyx_n_s_keep, __pyx_k_keep, sizeof(__pyx_k_keep), 0, 0, 1, 1},
{&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1},
{&__pyx_kp_u_ndarray_is_not_C_contiguous, __pyx_k_ndarray_is_not_C_contiguous, sizeof(__pyx_k_ndarray_is_not_C_contiguous), 0, 1, 0, 0},
{&__pyx_kp_u_ndarray_is_not_Fortran_contiguou, __pyx_k_ndarray_is_not_Fortran_contiguou, sizeof(__pyx_k_ndarray_is_not_Fortran_contiguou), 0, 1, 0, 0},
{&__pyx_n_s_nms_gpu_nms, __pyx_k_nms_gpu_nms, sizeof(__pyx_k_nms_gpu_nms), 0, 0, 1, 1},
{&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1},
{&__pyx_n_s_num_out, __pyx_k_num_out, sizeof(__pyx_k_num_out), 0, 0, 1, 1},
{&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1},
{&__pyx_n_s_order, __pyx_k_order, sizeof(__pyx_k_order), 0, 0, 1, 1},
{&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1},
{&__pyx_n_s_scores, __pyx_k_scores, sizeof(__pyx_k_scores), 0, 0, 1, 1},
{&__pyx_n_s_sorted_dets, __pyx_k_sorted_dets, sizeof(__pyx_k_sorted_dets), 0, 0, 1, 1},
{&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1},
{&__pyx_n_s_thresh, __pyx_k_thresh, sizeof(__pyx_k_thresh), 0, 0, 1, 1},
{&__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_k_unknown_dtype_code_in_numpy_pxd, sizeof(__pyx_k_unknown_dtype_code_in_numpy_pxd), 0, 1, 0, 0},
{&__pyx_n_s_zeros, __pyx_k_zeros, sizeof(__pyx_k_zeros), 0, 0, 1, 1},
{0, 0, 0, 0, 0, 0, 0}
};
static int __Pyx_InitCachedBuiltins(void) {
__pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(1, 218, __pyx_L1_error)
__pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(1, 231, __pyx_L1_error)
__pyx_builtin_RuntimeError = __Pyx_GetBuiltinName(__pyx_n_s_RuntimeError); if (!__pyx_builtin_RuntimeError) __PYX_ERR(1, 799, __pyx_L1_error)
return 0;
__pyx_L1_error:;
return -1;
}
static int __Pyx_InitCachedConstants(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0);
/* "nms/gpu_nms.pyx":24
* keep = np.zeros(boxes_num, dtype=np.int32)
* cdef np.ndarray[np.float32_t, ndim=1] \
* scores = dets[:, 4] # <<<<<<<<<<<<<<
* #cdef np.ndarray[np.int_t, ndim=1] \ // 20160601, by xzn
* # order = scores.argsort()[::-1]
*/
__pyx_slice_ = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice_)) __PYX_ERR(0, 24, __pyx_L1_error)
__Pyx_GOTREF(__pyx_slice_);
__Pyx_GIVEREF(__pyx_slice_);
__pyx_tuple__2 = PyTuple_Pack(2, __pyx_slice_, __pyx_int_4); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(0, 24, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__2);
__Pyx_GIVEREF(__pyx_tuple__2);
/* "nms/gpu_nms.pyx":28
* # order = scores.argsort()[::-1]
* cdef np.ndarray[np.intp_t, ndim=1] \
* order = scores.argsort()[::-1] # <<<<<<<<<<<<<<
* cdef np.ndarray[np.float32_t, ndim=2] \
* sorted_dets = dets[order, :]
*/
__pyx_slice__3 = PySlice_New(Py_None, Py_None, __pyx_int_neg_1); if (unlikely(!__pyx_slice__3)) __PYX_ERR(0, 28, __pyx_L1_error)
__Pyx_GOTREF(__pyx_slice__3);
__Pyx_GIVEREF(__pyx_slice__3);
/* "nms/gpu_nms.pyx":30
* order = scores.argsort()[::-1]
* cdef np.ndarray[np.float32_t, ndim=2] \
* sorted_dets = dets[order, :] # <<<<<<<<<<<<<<
* _nms(&keep[0], &num_out, &sorted_dets[0, 0], boxes_num, boxes_dim, thresh, device_id)
* keep = keep[:num_out]
*/
__pyx_slice__4 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__4)) __PYX_ERR(0, 30, __pyx_L1_error)
__Pyx_GOTREF(__pyx_slice__4);
__Pyx_GIVEREF(__pyx_slice__4);
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":218
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
* raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<<
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
*/
__pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_C_contiguous); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 218, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__5);
__Pyx_GIVEREF(__pyx_tuple__5);
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":222
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
* raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<<
*
* info.buf = PyArray_DATA(self)
*/
__pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_Fortran_contiguou); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 222, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__6);
__Pyx_GIVEREF(__pyx_tuple__6);
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":259
* if ((descr.byteorder == c'>' and little_endian) or
* (descr.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<<
* if t == NPY_BYTE: f = "b"
* elif t == NPY_UBYTE: f = "B"
*/
__pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 259, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__7);
__Pyx_GIVEREF(__pyx_tuple__7);
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":799
*
* if (end - f) - <int>(new_offset - offset[0]) < 15:
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<<
*
* if ((child.byteorder == c'>' and little_endian) or
*/
__pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 799, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__8);
__Pyx_GIVEREF(__pyx_tuple__8);
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":803
* if ((child.byteorder == c'>' and little_endian) or
* (child.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<<
* # One could encode it in the format string and have Cython
* # complain instead, BUT: < and > in format strings also imply
*/
__pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 803, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__9);
__Pyx_GIVEREF(__pyx_tuple__9);
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":823
* t = child.type_num
* if end - f < 5:
* raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<<
*
* # Until ticket #99 is fixed, use integers to avoid warnings
*/
__pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor_2); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(1, 823, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__10);
__Pyx_GIVEREF(__pyx_tuple__10);
/* "nms/gpu_nms.pyx":16
* void _nms(np.int32_t*, int*, np.float32_t*, int, int, float, int)
*
* def gpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh, # <<<<<<<<<<<<<<
* np.int32_t device_id=0):
* cdef int boxes_num = dets.shape[0]
*/
__pyx_tuple__11 = PyTuple_Pack(10, __pyx_n_s_dets, __pyx_n_s_thresh, __pyx_n_s_device_id, __pyx_n_s_boxes_num, __pyx_n_s_boxes_dim, __pyx_n_s_num_out, __pyx_n_s_keep, __pyx_n_s_scores, __pyx_n_s_order, __pyx_n_s_sorted_dets); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__11);
__Pyx_GIVEREF(__pyx_tuple__11);
__pyx_codeobj__12 = (PyObject*)__Pyx_PyCode_New(3, 0, 10, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__11, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_D_v_zix_caffe_caffe_win_20160523, __pyx_n_s_gpu_nms, 16, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__12)) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_RefNannyFinishContext();
return -1;
}
static int __Pyx_InitGlobals(void) {
if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
__pyx_int_4 = PyInt_FromLong(4); if (unlikely(!__pyx_int_4)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) __PYX_ERR(0, 1, __pyx_L1_error)
return 0;
__pyx_L1_error:;
return -1;
}
#if PY_MAJOR_VERSION < 3
PyMODINIT_FUNC initgpu_nms(void); /*proto*/
PyMODINIT_FUNC initgpu_nms(void)
#else
PyMODINIT_FUNC PyInit_gpu_nms(void); /*proto*/
PyMODINIT_FUNC PyInit_gpu_nms(void)
#endif
{
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannyDeclarations
#if CYTHON_REFNANNY
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny");
if (!__Pyx_RefNanny) {
PyErr_Clear();
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny");
if (!__Pyx_RefNanny)
Py_FatalError("failed to import 'refnanny' module");
}
#endif
__Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit_gpu_nms(void)", 0);
if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error)
#ifdef __Pyx_CyFunction_USED
if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_FusedFunction_USED
if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_Coroutine_USED
if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_Generator_USED
if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_StopAsyncIteration_USED
if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
/*--- Library function declarations ---*/
/*--- Threads initialization code ---*/
#if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS
#ifdef WITH_THREAD /* Python build with threading support? */
PyEval_InitThreads();
#endif
#endif
/*--- Module creation code ---*/
#if PY_MAJOR_VERSION < 3
__pyx_m = Py_InitModule4("gpu_nms", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m);
#else
__pyx_m = PyModule_Create(&__pyx_moduledef);
#endif
if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_d);
__pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error)
#if CYTHON_COMPILING_IN_PYPY
Py_INCREF(__pyx_b);
#endif
if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
/*--- Initialize various global constants etc. ---*/
if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)
if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
if (__pyx_module_is_main_nms__gpu_nms) {
if (PyObject_SetAttrString(__pyx_m, "__name__", __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
}
#if PY_MAJOR_VERSION >= 3
{
PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error)
if (!PyDict_GetItemString(modules, "nms.gpu_nms")) {
if (unlikely(PyDict_SetItemString(modules, "nms.gpu_nms", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error)
}
}
#endif
/*--- Builtin init code ---*/
if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
/*--- Constants init code ---*/
if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
/*--- Global init code ---*/
/*--- Variable export code ---*/
/*--- Function export code ---*/
/*--- Type init code ---*/
/*--- Type import code ---*/
__pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "type",
#if CYTHON_COMPILING_IN_PYPY
sizeof(PyTypeObject),
#else
sizeof(PyHeapTypeObject),
#endif
0); if (unlikely(!__pyx_ptype_7cpython_4type_type)) __PYX_ERR(2, 9, __pyx_L1_error)
__pyx_ptype_5numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_5numpy_dtype)) __PYX_ERR(1, 155, __pyx_L1_error)
__pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) __PYX_ERR(1, 168, __pyx_L1_error)
__pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) __PYX_ERR(1, 172, __pyx_L1_error)
__pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) __PYX_ERR(1, 181, __pyx_L1_error)
__pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) __PYX_ERR(1, 861, __pyx_L1_error)
/*--- Variable import code ---*/
/*--- Function import code ---*/
/*--- Execution code ---*/
#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED)
if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
/* "nms/gpu_nms.pyx":8
* # --------------------------------------------------------
*
* import numpy as np # <<<<<<<<<<<<<<
* cimport numpy as np
*
*/
__pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_1) < 0) __PYX_ERR(0, 8, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "nms/gpu_nms.pyx":11
* cimport numpy as np
*
* assert sizeof(int) == sizeof(np.int32_t) # <<<<<<<<<<<<<<
*
* cdef extern from "gpu_nms.hpp":
*/
#ifndef CYTHON_WITHOUT_ASSERTIONS
if (unlikely(!Py_OptimizeFlag)) {
if (unlikely(!(((sizeof(int)) == (sizeof(__pyx_t_5numpy_int32_t))) != 0))) {
PyErr_SetNone(PyExc_AssertionError);
__PYX_ERR(0, 11, __pyx_L1_error)
}
}
#endif
/* "nms/gpu_nms.pyx":16
* void _nms(np.int32_t*, int*, np.float32_t*, int, int, float, int)
*
* def gpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh, # <<<<<<<<<<<<<<
* np.int32_t device_id=0):
* cdef int boxes_num = dets.shape[0]
*/
__pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_3nms_7gpu_nms_1gpu_nms, NULL, __pyx_n_s_nms_gpu_nms); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_gpu_nms, __pyx_t_1) < 0) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "nms/gpu_nms.pyx":1
* # -------------------------------------------------------- # <<<<<<<<<<<<<<
* # Faster R-CNN
* # Copyright (c) 2015 Microsoft
*/
__pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":976
* arr.base = baseptr
*
* cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<<
* if arr.base is NULL:
* return None
*/
/*--- Wrapped vars code ---*/
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
if (__pyx_m) {
if (__pyx_d) {
__Pyx_AddTraceback("init nms.gpu_nms", __pyx_clineno, __pyx_lineno, __pyx_filename);
}
Py_DECREF(__pyx_m); __pyx_m = 0;
} else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_ImportError, "init nms.gpu_nms");
}
__pyx_L0:;
__Pyx_RefNannyFinishContext();
#if PY_MAJOR_VERSION < 3
return;
#else
return __pyx_m;
#endif
}
/* --- Runtime support code --- */
/* Refnanny */
#if CYTHON_REFNANNY
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) {
PyObject *m = NULL, *p = NULL;
void *r = NULL;
m = PyImport_ImportModule((char *)modname);
if (!m) goto end;
p = PyObject_GetAttrString(m, (char *)"RefNannyAPI");
if (!p) goto end;
r = PyLong_AsVoidPtr(p);
end:
Py_XDECREF(p);
Py_XDECREF(m);
return (__Pyx_RefNannyAPIStruct *)r;
}
#endif
/* RaiseArgTupleInvalid */
static void __Pyx_RaiseArgtupleInvalid(
const char* func_name,
int exact,
Py_ssize_t num_min,
Py_ssize_t num_max,
Py_ssize_t num_found)
{
Py_ssize_t num_expected;
const char *more_or_less;
if (num_found < num_min) {
num_expected = num_min;
more_or_less = "at least";
} else {
num_expected = num_max;
more_or_less = "at most";
}
if (exact) {
more_or_less = "exactly";
}
PyErr_Format(PyExc_TypeError,
"%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)",
func_name, more_or_less, num_expected,
(num_expected == 1) ? "" : "s", num_found);
}
/* RaiseDoubleKeywords */
static void __Pyx_RaiseDoubleKeywordsError(
const char* func_name,
PyObject* kw_name)
{
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION >= 3
"%s() got multiple values for keyword argument '%U'", func_name, kw_name);
#else
"%s() got multiple values for keyword argument '%s'", func_name,
PyString_AsString(kw_name));
#endif
}
/* ParseKeywords */
static int __Pyx_ParseOptionalKeywords(
PyObject *kwds,
PyObject **argnames[],
PyObject *kwds2,
PyObject *values[],
Py_ssize_t num_pos_args,
const char* function_name)
{
PyObject *key = 0, *value = 0;
Py_ssize_t pos = 0;
PyObject*** name;
PyObject*** first_kw_arg = argnames + num_pos_args;
while (PyDict_Next(kwds, &pos, &key, &value)) {
name = first_kw_arg;
while (*name && (**name != key)) name++;
if (*name) {
values[name-argnames] = value;
continue;
}
name = first_kw_arg;
#if PY_MAJOR_VERSION < 3
if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) {
while (*name) {
if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key))
&& _PyString_Eq(**name, key)) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
if ((**argname == key) || (
(CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key))
&& _PyString_Eq(**argname, key))) {
goto arg_passed_twice;
}
argname++;
}
}
} else
#endif
if (likely(PyUnicode_Check(key))) {
while (*name) {
int cmp = (**name == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 :
#endif
PyUnicode_Compare(**name, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
int cmp = (**argname == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 :
#endif
PyUnicode_Compare(**argname, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) goto arg_passed_twice;
argname++;
}
}
} else
goto invalid_keyword_type;
if (kwds2) {
if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad;
} else {
goto invalid_keyword;
}
}
return 0;
arg_passed_twice:
__Pyx_RaiseDoubleKeywordsError(function_name, key);
goto bad;
invalid_keyword_type:
PyErr_Format(PyExc_TypeError,
"%.200s() keywords must be strings", function_name);
goto bad;
invalid_keyword:
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION < 3
"%.200s() got an unexpected keyword argument '%.200s'",
function_name, PyString_AsString(key));
#else
"%s() got an unexpected keyword argument '%U'",
function_name, key);
#endif
bad:
return -1;
}
/* ArgTypeTest */
static void __Pyx_RaiseArgumentTypeInvalid(const char* name, PyObject *obj, PyTypeObject *type) {
PyErr_Format(PyExc_TypeError,
"Argument '%.200s' has incorrect type (expected %.200s, got %.200s)",
name, type->tp_name, Py_TYPE(obj)->tp_name);
}
static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed,
const char *name, int exact)
{
if (unlikely(!type)) {
PyErr_SetString(PyExc_SystemError, "Missing type object");
return 0;
}
if (none_allowed && obj == Py_None) return 1;
else if (exact) {
if (likely(Py_TYPE(obj) == type)) return 1;
#if PY_MAJOR_VERSION == 2
else if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1;
#endif
}
else {
if (likely(PyObject_TypeCheck(obj, type))) return 1;
}
__Pyx_RaiseArgumentTypeInvalid(name, obj, type);
return 0;
}
/* BufferFormatCheck */
static CYTHON_INLINE int __Pyx_IsLittleEndian(void) {
unsigned int n = 1;
return *(unsigned char*)(&n) != 0;
}
static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
__Pyx_BufFmt_StackElem* stack,
__Pyx_TypeInfo* type) {
stack[0].field = &ctx->root;
stack[0].parent_offset = 0;
ctx->root.type = type;
ctx->root.name = "buffer dtype";
ctx->root.offset = 0;
ctx->head = stack;
ctx->head->field = &ctx->root;
ctx->fmt_offset = 0;
ctx->head->parent_offset = 0;
ctx->new_packmode = '@';
ctx->enc_packmode = '@';
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->is_complex = 0;
ctx->is_valid_array = 0;
ctx->struct_alignment = 0;
while (type->typegroup == 'S') {
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = 0;
type = type->fields->type;
}
}
static int __Pyx_BufFmt_ParseNumber(const char** ts) {
int count;
const char* t = *ts;
if (*t < '0' || *t > '9') {
return -1;
} else {
count = *t++ - '0';
while (*t >= '0' && *t < '9') {
count *= 10;
count += *t++ - '0';
}
}
*ts = t;
return count;
}
static int __Pyx_BufFmt_ExpectNumber(const char **ts) {
int number = __Pyx_BufFmt_ParseNumber(ts);
if (number == -1)
PyErr_Format(PyExc_ValueError,\
"Does not understand character buffer dtype format string ('%c')", **ts);
return number;
}
static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) {
PyErr_Format(PyExc_ValueError,
"Unexpected format string character: '%c'", ch);
}
static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) {
switch (ch) {
case 'c': return "'char'";
case 'b': return "'signed char'";
case 'B': return "'unsigned char'";
case 'h': return "'short'";
case 'H': return "'unsigned short'";
case 'i': return "'int'";
case 'I': return "'unsigned int'";
case 'l': return "'long'";
case 'L': return "'unsigned long'";
case 'q': return "'long long'";
case 'Q': return "'unsigned long long'";
case 'f': return (is_complex ? "'complex float'" : "'float'");
case 'd': return (is_complex ? "'complex double'" : "'double'");
case 'g': return (is_complex ? "'complex long double'" : "'long double'");
case 'T': return "a struct";
case 'O': return "Python object";
case 'P': return "a pointer";
case 's': case 'p': return "a string";
case 0: return "end";
default: return "unparseable format string";
}
}
static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return 2;
case 'i': case 'I': case 'l': case 'L': return 4;
case 'q': case 'Q': return 8;
case 'f': return (is_complex ? 8 : 4);
case 'd': return (is_complex ? 16 : 8);
case 'g': {
PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g')..");
return 0;
}
case 'O': case 'P': return sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) {
switch (ch) {
case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(short);
case 'i': case 'I': return sizeof(int);
case 'l': case 'L': return sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(float) * (is_complex ? 2 : 1);
case 'd': return sizeof(double) * (is_complex ? 2 : 1);
case 'g': return sizeof(long double) * (is_complex ? 2 : 1);
case 'O': case 'P': return sizeof(void*);
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
typedef struct { char c; short x; } __Pyx_st_short;
typedef struct { char c; int x; } __Pyx_st_int;
typedef struct { char c; long x; } __Pyx_st_long;
typedef struct { char c; float x; } __Pyx_st_float;
typedef struct { char c; double x; } __Pyx_st_double;
typedef struct { char c; long double x; } __Pyx_st_longdouble;
typedef struct { char c; void *x; } __Pyx_st_void_p;
#ifdef HAVE_LONG_LONG
typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong;
#endif
static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short);
case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int);
case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(__Pyx_st_float) - sizeof(float);
case 'd': return sizeof(__Pyx_st_double) - sizeof(double);
case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double);
case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
/* These are for computing the padding at the end of the struct to align
on the first member of the struct. This will probably the same as above,
but we don't have any guarantees.
*/
typedef struct { short x; char c; } __Pyx_pad_short;
typedef struct { int x; char c; } __Pyx_pad_int;
typedef struct { long x; char c; } __Pyx_pad_long;
typedef struct { float x; char c; } __Pyx_pad_float;
typedef struct { double x; char c; } __Pyx_pad_double;
typedef struct { long double x; char c; } __Pyx_pad_longdouble;
typedef struct { void *x; char c; } __Pyx_pad_void_p;
#ifdef HAVE_LONG_LONG
typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong;
#endif
static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short);
case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int);
case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(__Pyx_pad_float) - sizeof(float);
case 'd': return sizeof(__Pyx_pad_double) - sizeof(double);
case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double);
case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) {
switch (ch) {
case 'c':
return 'H';
case 'b': case 'h': case 'i':
case 'l': case 'q': case 's': case 'p':
return 'I';
case 'B': case 'H': case 'I': case 'L': case 'Q':
return 'U';
case 'f': case 'd': case 'g':
return (is_complex ? 'C' : 'R');
case 'O':
return 'O';
case 'P':
return 'P';
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) {
if (ctx->head == NULL || ctx->head->field == &ctx->root) {
const char* expected;
const char* quote;
if (ctx->head == NULL) {
expected = "end";
quote = "";
} else {
expected = ctx->head->field->type->name;
quote = "'";
}
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected %s%s%s but got %s",
quote, expected, quote,
__Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex));
} else {
__Pyx_StructField* field = ctx->head->field;
__Pyx_StructField* parent = (ctx->head - 1)->field;
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'",
field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex),
parent->type->name, field->name);
}
}
static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) {
char group;
size_t size, offset, arraysize = 1;
if (ctx->enc_type == 0) return 0;
if (ctx->head->field->type->arraysize[0]) {
int i, ndim = 0;
if (ctx->enc_type == 's' || ctx->enc_type == 'p') {
ctx->is_valid_array = ctx->head->field->type->ndim == 1;
ndim = 1;
if (ctx->enc_count != ctx->head->field->type->arraysize[0]) {
PyErr_Format(PyExc_ValueError,
"Expected a dimension of size %zu, got %zu",
ctx->head->field->type->arraysize[0], ctx->enc_count);
return -1;
}
}
if (!ctx->is_valid_array) {
PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d",
ctx->head->field->type->ndim, ndim);
return -1;
}
for (i = 0; i < ctx->head->field->type->ndim; i++) {
arraysize *= ctx->head->field->type->arraysize[i];
}
ctx->is_valid_array = 0;
ctx->enc_count = 1;
}
group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex);
do {
__Pyx_StructField* field = ctx->head->field;
__Pyx_TypeInfo* type = field->type;
if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') {
size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex);
} else {
size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex);
}
if (ctx->enc_packmode == '@') {
size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex);
size_t align_mod_offset;
if (align_at == 0) return -1;
align_mod_offset = ctx->fmt_offset % align_at;
if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset;
if (ctx->struct_alignment == 0)
ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type,
ctx->is_complex);
}
if (type->size != size || type->typegroup != group) {
if (type->typegroup == 'C' && type->fields != NULL) {
size_t parent_offset = ctx->head->parent_offset + field->offset;
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = parent_offset;
continue;
}
if ((type->typegroup == 'H' || group == 'H') && type->size == size) {
} else {
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
}
offset = ctx->head->parent_offset + field->offset;
if (ctx->fmt_offset != offset) {
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected",
(Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset);
return -1;
}
ctx->fmt_offset += size;
if (arraysize)
ctx->fmt_offset += (arraysize - 1) * size;
--ctx->enc_count;
while (1) {
if (field == &ctx->root) {
ctx->head = NULL;
if (ctx->enc_count != 0) {
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
break;
}
ctx->head->field = ++field;
if (field->type == NULL) {
--ctx->head;
field = ctx->head->field;
continue;
} else if (field->type->typegroup == 'S') {
size_t parent_offset = ctx->head->parent_offset + field->offset;
if (field->type->fields->type == NULL) continue;
field = field->type->fields;
++ctx->head;
ctx->head->field = field;
ctx->head->parent_offset = parent_offset;
break;
} else {
break;
}
}
} while (ctx->enc_count);
ctx->enc_type = 0;
ctx->is_complex = 0;
return 0;
}
static CYTHON_INLINE PyObject *
__pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp)
{
const char *ts = *tsp;
int i = 0, number;
int ndim = ctx->head->field->type->ndim;
;
++ts;
if (ctx->new_count != 1) {
PyErr_SetString(PyExc_ValueError,
"Cannot handle repeated arrays in format string");
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
while (*ts && *ts != ')') {
switch (*ts) {
case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue;
default: break;
}
number = __Pyx_BufFmt_ExpectNumber(&ts);
if (number == -1) return NULL;
if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i])
return PyErr_Format(PyExc_ValueError,
"Expected a dimension of size %zu, got %d",
ctx->head->field->type->arraysize[i], number);
if (*ts != ',' && *ts != ')')
return PyErr_Format(PyExc_ValueError,
"Expected a comma in format string, got '%c'", *ts);
if (*ts == ',') ts++;
i++;
}
if (i != ndim)
return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d",
ctx->head->field->type->ndim, i);
if (!*ts) {
PyErr_SetString(PyExc_ValueError,
"Unexpected end of format string, expected ')'");
return NULL;
}
ctx->is_valid_array = 1;
ctx->new_count = 1;
*tsp = ++ts;
return Py_None;
}
static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) {
int got_Z = 0;
while (1) {
switch(*ts) {
case 0:
if (ctx->enc_type != 0 && ctx->head == NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
if (ctx->head != NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
return ts;
case ' ':
case '\r':
case '\n':
++ts;
break;
case '<':
if (!__Pyx_IsLittleEndian()) {
PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler");
return NULL;
}
ctx->new_packmode = '=';
++ts;
break;
case '>':
case '!':
if (__Pyx_IsLittleEndian()) {
PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler");
return NULL;
}
ctx->new_packmode = '=';
++ts;
break;
case '=':
case '@':
case '^':
ctx->new_packmode = *ts++;
break;
case 'T':
{
const char* ts_after_sub;
size_t i, struct_count = ctx->new_count;
size_t struct_alignment = ctx->struct_alignment;
ctx->new_count = 1;
++ts;
if (*ts != '{') {
PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'");
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_type = 0;
ctx->enc_count = 0;
ctx->struct_alignment = 0;
++ts;
ts_after_sub = ts;
for (i = 0; i != struct_count; ++i) {
ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts);
if (!ts_after_sub) return NULL;
}
ts = ts_after_sub;
if (struct_alignment) ctx->struct_alignment = struct_alignment;
}
break;
case '}':
{
size_t alignment = ctx->struct_alignment;
++ts;
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_type = 0;
if (alignment && ctx->fmt_offset % alignment) {
ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment);
}
}
return ts;
case 'x':
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->fmt_offset += ctx->new_count;
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->enc_packmode = ctx->new_packmode;
++ts;
break;
case 'Z':
got_Z = 1;
++ts;
if (*ts != 'f' && *ts != 'd' && *ts != 'g') {
__Pyx_BufFmt_RaiseUnexpectedChar('Z');
return NULL;
}
case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I':
case 'l': case 'L': case 'q': case 'Q':
case 'f': case 'd': case 'g':
case 'O': case 'p':
if (ctx->enc_type == *ts && got_Z == ctx->is_complex &&
ctx->enc_packmode == ctx->new_packmode) {
ctx->enc_count += ctx->new_count;
ctx->new_count = 1;
got_Z = 0;
++ts;
break;
}
case 's':
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_count = ctx->new_count;
ctx->enc_packmode = ctx->new_packmode;
ctx->enc_type = *ts;
ctx->is_complex = got_Z;
++ts;
ctx->new_count = 1;
got_Z = 0;
break;
case ':':
++ts;
while(*ts != ':') ++ts;
++ts;
break;
case '(':
if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL;
break;
default:
{
int number = __Pyx_BufFmt_ExpectNumber(&ts);
if (number == -1) return NULL;
ctx->new_count = (size_t)number;
}
}
}
}
static CYTHON_INLINE void __Pyx_ZeroBuffer(Py_buffer* buf) {
buf->buf = NULL;
buf->obj = NULL;
buf->strides = __Pyx_zeros;
buf->shape = __Pyx_zeros;
buf->suboffsets = __Pyx_minusones;
}
static CYTHON_INLINE int __Pyx_GetBufferAndValidate(
Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags,
int nd, int cast, __Pyx_BufFmt_StackElem* stack)
{
if (obj == Py_None || obj == NULL) {
__Pyx_ZeroBuffer(buf);
return 0;
}
buf->buf = NULL;
if (__Pyx_GetBuffer(obj, buf, flags) == -1) goto fail;
if (buf->ndim != nd) {
PyErr_Format(PyExc_ValueError,
"Buffer has wrong number of dimensions (expected %d, got %d)",
nd, buf->ndim);
goto fail;
}
if (!cast) {
__Pyx_BufFmt_Context ctx;
__Pyx_BufFmt_Init(&ctx, stack, dtype);
if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail;
}
if ((unsigned)buf->itemsize != dtype->size) {
PyErr_Format(PyExc_ValueError,
"Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "d byte%s) does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "d byte%s)",
buf->itemsize, (buf->itemsize > 1) ? "s" : "",
dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : "");
goto fail;
}
if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones;
return 0;
fail:;
__Pyx_ZeroBuffer(buf);
return -1;
}
static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) {
if (info->buf == NULL) return;
if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL;
__Pyx_ReleaseBuffer(info);
}
/* GetBuiltinName */
static PyObject *__Pyx_GetBuiltinName(PyObject *name) {
PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name);
if (unlikely(!result)) {
PyErr_Format(PyExc_NameError,
#if PY_MAJOR_VERSION >= 3
"name '%U' is not defined", name);
#else
"name '%.200s' is not defined", PyString_AS_STRING(name));
#endif
}
return result;
}
/* GetModuleGlobalName */
static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name) {
PyObject *result;
#if CYTHON_COMPILING_IN_CPYTHON
result = PyDict_GetItem(__pyx_d, name);
if (likely(result)) {
Py_INCREF(result);
} else {
#else
result = PyObject_GetItem(__pyx_d, name);
if (!result) {
PyErr_Clear();
#endif
result = __Pyx_GetBuiltinName(name);
}
return result;
}
/* PyObjectCall */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) {
PyObject *result;
ternaryfunc call = func->ob_type->tp_call;
if (unlikely(!call))
return PyObject_Call(func, arg, kw);
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
result = (*call)(func, arg, kw);
Py_LeaveRecursiveCall();
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
/* ExtTypeTest */
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) {
if (unlikely(!type)) {
PyErr_SetString(PyExc_SystemError, "Missing type object");
return 0;
}
if (likely(PyObject_TypeCheck(obj, type)))
return 1;
PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s",
Py_TYPE(obj)->tp_name, type->tp_name);
return 0;
}
/* PyObjectCallMethO */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) {
PyObject *self, *result;
PyCFunction cfunc;
cfunc = PyCFunction_GET_FUNCTION(func);
self = PyCFunction_GET_SELF(func);
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
result = cfunc(self, arg);
Py_LeaveRecursiveCall();
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
/* PyObjectCallOneArg */
#if CYTHON_COMPILING_IN_CPYTHON
static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) {
PyObject *result;
PyObject *args = PyTuple_New(1);
if (unlikely(!args)) return NULL;
Py_INCREF(arg);
PyTuple_SET_ITEM(args, 0, arg);
result = __Pyx_PyObject_Call(func, args, NULL);
Py_DECREF(args);
return result;
}
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
#ifdef __Pyx_CyFunction_USED
if (likely(PyCFunction_Check(func) || PyObject_TypeCheck(func, __pyx_CyFunctionType))) {
#else
if (likely(PyCFunction_Check(func))) {
#endif
if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) {
return __Pyx_PyObject_CallMethO(func, arg);
}
}
return __Pyx__PyObject_CallOneArg(func, arg);
}
#else
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
PyObject *result;
PyObject *args = PyTuple_Pack(1, arg);
if (unlikely(!args)) return NULL;
result = __Pyx_PyObject_Call(func, args, NULL);
Py_DECREF(args);
return result;
}
#endif
/* PyObjectCallNoArg */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func) {
#ifdef __Pyx_CyFunction_USED
if (likely(PyCFunction_Check(func) || PyObject_TypeCheck(func, __pyx_CyFunctionType))) {
#else
if (likely(PyCFunction_Check(func))) {
#endif
if (likely(PyCFunction_GET_FLAGS(func) & METH_NOARGS)) {
return __Pyx_PyObject_CallMethO(func, NULL);
}
}
return __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL);
}
#endif
/* BufferIndexError */
static void __Pyx_RaiseBufferIndexError(int axis) {
PyErr_Format(PyExc_IndexError,
"Out of bounds on buffer access (axis %d)", axis);
}
/* SliceObject */
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice(PyObject* obj,
Py_ssize_t cstart, Py_ssize_t cstop,
PyObject** _py_start, PyObject** _py_stop, PyObject** _py_slice,
int has_cstart, int has_cstop, CYTHON_UNUSED int wraparound) {
#if CYTHON_COMPILING_IN_CPYTHON
PyMappingMethods* mp;
#if PY_MAJOR_VERSION < 3
PySequenceMethods* ms = Py_TYPE(obj)->tp_as_sequence;
if (likely(ms && ms->sq_slice)) {
if (!has_cstart) {
if (_py_start && (*_py_start != Py_None)) {
cstart = __Pyx_PyIndex_AsSsize_t(*_py_start);
if ((cstart == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad;
} else
cstart = 0;
}
if (!has_cstop) {
if (_py_stop && (*_py_stop != Py_None)) {
cstop = __Pyx_PyIndex_AsSsize_t(*_py_stop);
if ((cstop == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad;
} else
cstop = PY_SSIZE_T_MAX;
}
if (wraparound && unlikely((cstart < 0) | (cstop < 0)) && likely(ms->sq_length)) {
Py_ssize_t l = ms->sq_length(obj);
if (likely(l >= 0)) {
if (cstop < 0) {
cstop += l;
if (cstop < 0) cstop = 0;
}
if (cstart < 0) {
cstart += l;
if (cstart < 0) cstart = 0;
}
} else {
if (!PyErr_ExceptionMatches(PyExc_OverflowError))
goto bad;
PyErr_Clear();
}
}
return ms->sq_slice(obj, cstart, cstop);
}
#endif
mp = Py_TYPE(obj)->tp_as_mapping;
if (likely(mp && mp->mp_subscript))
#endif
{
PyObject* result;
PyObject *py_slice, *py_start, *py_stop;
if (_py_slice) {
py_slice = *_py_slice;
} else {
PyObject* owned_start = NULL;
PyObject* owned_stop = NULL;
if (_py_start) {
py_start = *_py_start;
} else {
if (has_cstart) {
owned_start = py_start = PyInt_FromSsize_t(cstart);
if (unlikely(!py_start)) goto bad;
} else
py_start = Py_None;
}
if (_py_stop) {
py_stop = *_py_stop;
} else {
if (has_cstop) {
owned_stop = py_stop = PyInt_FromSsize_t(cstop);
if (unlikely(!py_stop)) {
Py_XDECREF(owned_start);
goto bad;
}
} else
py_stop = Py_None;
}
py_slice = PySlice_New(py_start, py_stop, Py_None);
Py_XDECREF(owned_start);
Py_XDECREF(owned_stop);
if (unlikely(!py_slice)) goto bad;
}
#if CYTHON_COMPILING_IN_CPYTHON
result = mp->mp_subscript(obj, py_slice);
#else
result = PyObject_GetItem(obj, py_slice);
#endif
if (!_py_slice) {
Py_DECREF(py_slice);
}
return result;
}
PyErr_Format(PyExc_TypeError,
"'%.200s' object is unsliceable", Py_TYPE(obj)->tp_name);
bad:
return NULL;
}
/* BufferFallbackError */
static void __Pyx_RaiseBufferFallbackError(void) {
PyErr_SetString(PyExc_ValueError,
"Buffer acquisition failed on assignment; and then reacquiring the old buffer failed too!");
}
/* PyErrFetchRestore */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
tmp_type = tstate->curexc_type;
tmp_value = tstate->curexc_value;
tmp_tb = tstate->curexc_traceback;
tstate->curexc_type = type;
tstate->curexc_value = value;
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
}
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
*type = tstate->curexc_type;
*value = tstate->curexc_value;
*tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
}
#endif
/* RaiseException */
#if PY_MAJOR_VERSION < 3
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb,
CYTHON_UNUSED PyObject *cause) {
__Pyx_PyThreadState_declare
Py_XINCREF(type);
if (!value || value == Py_None)
value = NULL;
else
Py_INCREF(value);
if (!tb || tb == Py_None)
tb = NULL;
else {
Py_INCREF(tb);
if (!PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto raise_error;
}
}
if (PyType_Check(type)) {
#if CYTHON_COMPILING_IN_PYPY
if (!value) {
Py_INCREF(Py_None);
value = Py_None;
}
#endif
PyErr_NormalizeException(&type, &value, &tb);
} else {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto raise_error;
}
value = type;
type = (PyObject*) Py_TYPE(type);
Py_INCREF(type);
if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto raise_error;
}
}
__Pyx_PyThreadState_assign
__Pyx_ErrRestore(type, value, tb);
return;
raise_error:
Py_XDECREF(value);
Py_XDECREF(type);
Py_XDECREF(tb);
return;
}
#else
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) {
PyObject* owned_instance = NULL;
if (tb == Py_None) {
tb = 0;
} else if (tb && !PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto bad;
}
if (value == Py_None)
value = 0;
if (PyExceptionInstance_Check(type)) {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto bad;
}
value = type;
type = (PyObject*) Py_TYPE(value);
} else if (PyExceptionClass_Check(type)) {
PyObject *instance_class = NULL;
if (value && PyExceptionInstance_Check(value)) {
instance_class = (PyObject*) Py_TYPE(value);
if (instance_class != type) {
int is_subclass = PyObject_IsSubclass(instance_class, type);
if (!is_subclass) {
instance_class = NULL;
} else if (unlikely(is_subclass == -1)) {
goto bad;
} else {
type = instance_class;
}
}
}
if (!instance_class) {
PyObject *args;
if (!value)
args = PyTuple_New(0);
else if (PyTuple_Check(value)) {
Py_INCREF(value);
args = value;
} else
args = PyTuple_Pack(1, value);
if (!args)
goto bad;
owned_instance = PyObject_Call(type, args, NULL);
Py_DECREF(args);
if (!owned_instance)
goto bad;
value = owned_instance;
if (!PyExceptionInstance_Check(value)) {
PyErr_Format(PyExc_TypeError,
"calling %R should have returned an instance of "
"BaseException, not %R",
type, Py_TYPE(value));
goto bad;
}
}
} else {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto bad;
}
#if PY_VERSION_HEX >= 0x03030000
if (cause) {
#else
if (cause && cause != Py_None) {
#endif
PyObject *fixed_cause;
if (cause == Py_None) {
fixed_cause = NULL;
} else if (PyExceptionClass_Check(cause)) {
fixed_cause = PyObject_CallObject(cause, NULL);
if (fixed_cause == NULL)
goto bad;
} else if (PyExceptionInstance_Check(cause)) {
fixed_cause = cause;
Py_INCREF(fixed_cause);
} else {
PyErr_SetString(PyExc_TypeError,
"exception causes must derive from "
"BaseException");
goto bad;
}
PyException_SetCause(value, fixed_cause);
}
PyErr_SetObject(type, value);
if (tb) {
#if CYTHON_COMPILING_IN_PYPY
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb);
Py_INCREF(tb);
PyErr_Restore(tmp_type, tmp_value, tb);
Py_XDECREF(tmp_tb);
#else
PyThreadState *tstate = PyThreadState_GET();
PyObject* tmp_tb = tstate->curexc_traceback;
if (tb != tmp_tb) {
Py_INCREF(tb);
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_tb);
}
#endif
}
bad:
Py_XDECREF(owned_instance);
return;
}
#endif
/* RaiseTooManyValuesToUnpack */
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) {
PyErr_Format(PyExc_ValueError,
"too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected);
}
/* RaiseNeedMoreValuesToUnpack */
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) {
PyErr_Format(PyExc_ValueError,
"need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack",
index, (index == 1) ? "" : "s");
}
/* RaiseNoneIterError */
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
}
/* Import */
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) {
PyObject *empty_list = 0;
PyObject *module = 0;
PyObject *global_dict = 0;
PyObject *empty_dict = 0;
PyObject *list;
#if PY_VERSION_HEX < 0x03030000
PyObject *py_import;
py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import);
if (!py_import)
goto bad;
#endif
if (from_list)
list = from_list;
else {
empty_list = PyList_New(0);
if (!empty_list)
goto bad;
list = empty_list;
}
global_dict = PyModule_GetDict(__pyx_m);
if (!global_dict)
goto bad;
empty_dict = PyDict_New();
if (!empty_dict)
goto bad;
{
#if PY_MAJOR_VERSION >= 3
if (level == -1) {
if (strchr(__Pyx_MODULE_NAME, '.')) {
#if PY_VERSION_HEX < 0x03030000
PyObject *py_level = PyInt_FromLong(1);
if (!py_level)
goto bad;
module = PyObject_CallFunctionObjArgs(py_import,
name, global_dict, empty_dict, list, py_level, NULL);
Py_DECREF(py_level);
#else
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, 1);
#endif
if (!module) {
if (!PyErr_ExceptionMatches(PyExc_ImportError))
goto bad;
PyErr_Clear();
}
}
level = 0;
}
#endif
if (!module) {
#if PY_VERSION_HEX < 0x03030000
PyObject *py_level = PyInt_FromLong(level);
if (!py_level)
goto bad;
module = PyObject_CallFunctionObjArgs(py_import,
name, global_dict, empty_dict, list, py_level, NULL);
Py_DECREF(py_level);
#else
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, level);
#endif
}
}
bad:
#if PY_VERSION_HEX < 0x03030000
Py_XDECREF(py_import);
#endif
Py_XDECREF(empty_list);
Py_XDECREF(empty_dict);
return module;
}
/* CodeObjectCache */
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) {
int start = 0, mid = 0, end = count - 1;
if (end >= 0 && code_line > entries[end].code_line) {
return count;
}
while (start < end) {
mid = start + (end - start) / 2;
if (code_line < entries[mid].code_line) {
end = mid;
} else if (code_line > entries[mid].code_line) {
start = mid + 1;
} else {
return mid;
}
}
if (code_line <= entries[mid].code_line) {
return mid;
} else {
return mid + 1;
}
}
static PyCodeObject *__pyx_find_code_object(int code_line) {
PyCodeObject* code_object;
int pos;
if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) {
return NULL;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) {
return NULL;
}
code_object = __pyx_code_cache.entries[pos].code_object;
Py_INCREF(code_object);
return code_object;
}
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) {
int pos, i;
__Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries;
if (unlikely(!code_line)) {
return;
}
if (unlikely(!entries)) {
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry));
if (likely(entries)) {
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = 64;
__pyx_code_cache.count = 1;
entries[0].code_line = code_line;
entries[0].code_object = code_object;
Py_INCREF(code_object);
}
return;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) {
PyCodeObject* tmp = entries[pos].code_object;
entries[pos].code_object = code_object;
Py_DECREF(tmp);
return;
}
if (__pyx_code_cache.count == __pyx_code_cache.max_count) {
int new_max = __pyx_code_cache.max_count + 64;
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc(
__pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry));
if (unlikely(!entries)) {
return;
}
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = new_max;
}
for (i=__pyx_code_cache.count; i>pos; i--) {
entries[i] = entries[i-1];
}
entries[pos].code_line = code_line;
entries[pos].code_object = code_object;
__pyx_code_cache.count++;
Py_INCREF(code_object);
}
/* AddTraceback */
#include "compile.h"
#include "frameobject.h"
#include "traceback.h"
static PyCodeObject* __Pyx_CreateCodeObjectForTraceback(
const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyObject *py_srcfile = 0;
PyObject *py_funcname = 0;
#if PY_MAJOR_VERSION < 3
py_srcfile = PyString_FromString(filename);
#else
py_srcfile = PyUnicode_FromString(filename);
#endif
if (!py_srcfile) goto bad;
if (c_line) {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
#else
py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
#endif
}
else {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromString(funcname);
#else
py_funcname = PyUnicode_FromString(funcname);
#endif
}
if (!py_funcname) goto bad;
py_code = __Pyx_PyCode_New(
0,
0,
0,
0,
0,
__pyx_empty_bytes, /*PyObject *code,*/
__pyx_empty_tuple, /*PyObject *consts,*/
__pyx_empty_tuple, /*PyObject *names,*/
__pyx_empty_tuple, /*PyObject *varnames,*/
__pyx_empty_tuple, /*PyObject *freevars,*/
__pyx_empty_tuple, /*PyObject *cellvars,*/
py_srcfile, /*PyObject *filename,*/
py_funcname, /*PyObject *name,*/
py_line,
__pyx_empty_bytes /*PyObject *lnotab*/
);
Py_DECREF(py_srcfile);
Py_DECREF(py_funcname);
return py_code;
bad:
Py_XDECREF(py_srcfile);
Py_XDECREF(py_funcname);
return NULL;
}
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyFrameObject *py_frame = 0;
py_code = __pyx_find_code_object(c_line ? c_line : py_line);
if (!py_code) {
py_code = __Pyx_CreateCodeObjectForTraceback(
funcname, c_line, py_line, filename);
if (!py_code) goto bad;
__pyx_insert_code_object(c_line ? c_line : py_line, py_code);
}
py_frame = PyFrame_New(
PyThreadState_GET(), /*PyThreadState *tstate,*/
py_code, /*PyCodeObject *code,*/
__pyx_d, /*PyObject *globals,*/
0 /*PyObject *locals*/
);
if (!py_frame) goto bad;
py_frame->f_lineno = py_line;
PyTraceBack_Here(py_frame);
bad:
Py_XDECREF(py_code);
Py_XDECREF(py_frame);
}
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) {
if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags);
if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) return __pyx_pw_5numpy_7ndarray_1__getbuffer__(obj, view, flags);
PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name);
return -1;
}
static void __Pyx_ReleaseBuffer(Py_buffer *view) {
PyObject *obj = view->obj;
if (!obj) return;
if (PyObject_CheckBuffer(obj)) {
PyBuffer_Release(view);
return;
}
if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) { __pyx_pw_5numpy_7ndarray_3__releasebuffer__(obj, view); return; }
Py_DECREF(obj);
view->obj = NULL;
}
#endif
/* CIntFromPyVerify */
#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\
__PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0)
#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\
__PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1)
#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\
{\
func_type value = func_value;\
if (sizeof(target_type) < sizeof(func_type)) {\
if (unlikely(value != (func_type) (target_type) value)) {\
func_type zero = 0;\
if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\
return (target_type) -1;\
if (is_unsigned && unlikely(value < zero))\
goto raise_neg_overflow;\
else\
goto raise_overflow;\
}\
}\
return (target_type) value;\
}
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) {
const int neg_one = (int) -1, const_zero = (int) 0;
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(int) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(int) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
} else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
}
} else {
if (sizeof(int) <= sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(int),
little, !is_unsigned);
}
}
/* None */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
return ::std::complex< float >(x, y);
}
#else
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
return x + y*(__pyx_t_float_complex)_Complex_I;
}
#endif
#else
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
__pyx_t_float_complex z;
z.real = x;
z.imag = y;
return z;
}
#endif
/* None */
#if CYTHON_CCOMPLEX
#else
static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex a, __pyx_t_float_complex b) {
return (a.real == b.real) && (a.imag == b.imag);
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
z.real = a.real + b.real;
z.imag = a.imag + b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
z.real = a.real - b.real;
z.imag = a.imag - b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
z.real = a.real * b.real - a.imag * b.imag;
z.imag = a.real * b.imag + a.imag * b.real;
return z;
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
float denom = b.real * b.real + b.imag * b.imag;
z.real = (a.real * b.real + a.imag * b.imag) / denom;
z.imag = (a.imag * b.real - a.real * b.imag) / denom;
return z;
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex a) {
__pyx_t_float_complex z;
z.real = -a.real;
z.imag = -a.imag;
return z;
}
static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex a) {
return (a.real == 0) && (a.imag == 0);
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex a) {
__pyx_t_float_complex z;
z.real = a.real;
z.imag = -a.imag;
return z;
}
#if 1
static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex z) {
#if !defined(HAVE_HYPOT) || defined(_MSC_VER)
return sqrtf(z.real*z.real + z.imag*z.imag);
#else
return hypotf(z.real, z.imag);
#endif
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
float r, lnr, theta, z_r, z_theta;
if (b.imag == 0 && b.real == (int)b.real) {
if (b.real < 0) {
float denom = a.real * a.real + a.imag * a.imag;
a.real = a.real / denom;
a.imag = -a.imag / denom;
b.real = -b.real;
}
switch ((int)b.real) {
case 0:
z.real = 1;
z.imag = 0;
return z;
case 1:
return a;
case 2:
z = __Pyx_c_prodf(a, a);
return __Pyx_c_prodf(a, a);
case 3:
z = __Pyx_c_prodf(a, a);
return __Pyx_c_prodf(z, a);
case 4:
z = __Pyx_c_prodf(a, a);
return __Pyx_c_prodf(z, z);
}
}
if (a.imag == 0) {
if (a.real == 0) {
return a;
}
r = a.real;
theta = 0;
} else {
r = __Pyx_c_absf(a);
theta = atan2f(a.imag, a.real);
}
lnr = logf(r);
z_r = expf(lnr * b.real - theta * b.imag);
z_theta = theta * b.real + lnr * b.imag;
z.real = z_r * cosf(z_theta);
z.imag = z_r * sinf(z_theta);
return z;
}
#endif
#endif
/* None */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
return ::std::complex< double >(x, y);
}
#else
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
return x + y*(__pyx_t_double_complex)_Complex_I;
}
#endif
#else
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
__pyx_t_double_complex z;
z.real = x;
z.imag = y;
return z;
}
#endif
/* None */
#if CYTHON_CCOMPLEX
#else
static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex a, __pyx_t_double_complex b) {
return (a.real == b.real) && (a.imag == b.imag);
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
z.real = a.real + b.real;
z.imag = a.imag + b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
z.real = a.real - b.real;
z.imag = a.imag - b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
z.real = a.real * b.real - a.imag * b.imag;
z.imag = a.real * b.imag + a.imag * b.real;
return z;
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
double denom = b.real * b.real + b.imag * b.imag;
z.real = (a.real * b.real + a.imag * b.imag) / denom;
z.imag = (a.imag * b.real - a.real * b.imag) / denom;
return z;
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex a) {
__pyx_t_double_complex z;
z.real = -a.real;
z.imag = -a.imag;
return z;
}
static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex a) {
return (a.real == 0) && (a.imag == 0);
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex a) {
__pyx_t_double_complex z;
z.real = a.real;
z.imag = -a.imag;
return z;
}
#if 1
static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex z) {
#if !defined(HAVE_HYPOT) || defined(_MSC_VER)
return sqrt(z.real*z.real + z.imag*z.imag);
#else
return hypot(z.real, z.imag);
#endif
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
double r, lnr, theta, z_r, z_theta;
if (b.imag == 0 && b.real == (int)b.real) {
if (b.real < 0) {
double denom = a.real * a.real + a.imag * a.imag;
a.real = a.real / denom;
a.imag = -a.imag / denom;
b.real = -b.real;
}
switch ((int)b.real) {
case 0:
z.real = 1;
z.imag = 0;
return z;
case 1:
return a;
case 2:
z = __Pyx_c_prod(a, a);
return __Pyx_c_prod(a, a);
case 3:
z = __Pyx_c_prod(a, a);
return __Pyx_c_prod(z, a);
case 4:
z = __Pyx_c_prod(a, a);
return __Pyx_c_prod(z, z);
}
}
if (a.imag == 0) {
if (a.real == 0) {
return a;
}
r = a.real;
theta = 0;
} else {
r = __Pyx_c_abs(a);
theta = atan2(a.imag, a.real);
}
lnr = log(r);
z_r = exp(lnr * b.real - theta * b.imag);
z_theta = theta * b.real + lnr * b.imag;
z.real = z_r * cos(z_theta);
z.imag = z_r * sin(z_theta);
return z;
}
#endif
#endif
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value) {
const enum NPY_TYPES neg_one = (enum NPY_TYPES) -1, const_zero = (enum NPY_TYPES) 0;
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(enum NPY_TYPES) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
} else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
}
} else {
if (sizeof(enum NPY_TYPES) <= sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(enum NPY_TYPES) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(enum NPY_TYPES),
little, !is_unsigned);
}
}
/* CIntFromPy */
static CYTHON_INLINE npy_int32 __Pyx_PyInt_As_npy_int32(PyObject *x) {
const npy_int32 neg_one = (npy_int32) -1, const_zero = (npy_int32) 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(npy_int32) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(npy_int32, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (npy_int32) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (npy_int32) 0;
case 1: __PYX_VERIFY_RETURN_INT(npy_int32, digit, digits[0])
case 2:
if (8 * sizeof(npy_int32) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(npy_int32, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(npy_int32) >= 2 * PyLong_SHIFT) {
return (npy_int32) (((((npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(npy_int32) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(npy_int32, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(npy_int32) >= 3 * PyLong_SHIFT) {
return (npy_int32) (((((((npy_int32)digits[2]) << PyLong_SHIFT) | (npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(npy_int32) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(npy_int32, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(npy_int32) >= 4 * PyLong_SHIFT) {
return (npy_int32) (((((((((npy_int32)digits[3]) << PyLong_SHIFT) | (npy_int32)digits[2]) << PyLong_SHIFT) | (npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (npy_int32) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(npy_int32) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(npy_int32, unsigned long, PyLong_AsUnsignedLong(x))
} else if (sizeof(npy_int32) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(npy_int32, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (npy_int32) 0;
case -1: __PYX_VERIFY_RETURN_INT(npy_int32, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(npy_int32, digit, +digits[0])
case -2:
if (8 * sizeof(npy_int32) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(npy_int32, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(npy_int32) - 1 > 2 * PyLong_SHIFT) {
return (npy_int32) (((npy_int32)-1)*(((((npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(npy_int32) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(npy_int32, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(npy_int32) - 1 > 2 * PyLong_SHIFT) {
return (npy_int32) ((((((npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(npy_int32) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(npy_int32, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(npy_int32) - 1 > 3 * PyLong_SHIFT) {
return (npy_int32) (((npy_int32)-1)*(((((((npy_int32)digits[2]) << PyLong_SHIFT) | (npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(npy_int32) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(npy_int32, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(npy_int32) - 1 > 3 * PyLong_SHIFT) {
return (npy_int32) ((((((((npy_int32)digits[2]) << PyLong_SHIFT) | (npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(npy_int32) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(npy_int32, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(npy_int32) - 1 > 4 * PyLong_SHIFT) {
return (npy_int32) (((npy_int32)-1)*(((((((((npy_int32)digits[3]) << PyLong_SHIFT) | (npy_int32)digits[2]) << PyLong_SHIFT) | (npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(npy_int32) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(npy_int32, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(npy_int32) - 1 > 4 * PyLong_SHIFT) {
return (npy_int32) ((((((((((npy_int32)digits[3]) << PyLong_SHIFT) | (npy_int32)digits[2]) << PyLong_SHIFT) | (npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0])));
}
}
break;
}
#endif
if (sizeof(npy_int32) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(npy_int32, long, PyLong_AsLong(x))
} else if (sizeof(npy_int32) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(npy_int32, PY_LONG_LONG, PyLong_AsLongLong(x))
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
npy_int32 val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (npy_int32) -1;
}
} else {
npy_int32 val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (npy_int32) -1;
val = __Pyx_PyInt_As_npy_int32(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to npy_int32");
return (npy_int32) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to npy_int32");
return (npy_int32) -1;
}
/* CIntFromPy */
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) {
const int neg_one = (int) -1, const_zero = (int) 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(int) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (int) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (int) 0;
case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0])
case 2:
if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) {
return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) {
return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) {
return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (int) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(int) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x))
} else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (int) 0;
case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0])
case -2:
if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
}
#endif
if (sizeof(int) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x))
} else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x))
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
int val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (int) -1;
}
} else {
int val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (int) -1;
val = __Pyx_PyInt_As_int(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to int");
return (int) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to int");
return (int) -1;
}
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) {
const long neg_one = (long) -1, const_zero = (long) 0;
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(long) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(long) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
} else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
}
} else {
if (sizeof(long) <= sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(long),
little, !is_unsigned);
}
}
/* CIntFromPy */
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) {
const long neg_one = (long) -1, const_zero = (long) 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(long) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (long) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (long) 0;
case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0])
case 2:
if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) {
return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) {
return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) {
return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (long) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(long) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x))
} else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (long) 0;
case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0])
case -2:
if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
}
#endif
if (sizeof(long) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x))
} else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x))
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
long val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (long) -1;
}
} else {
long val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (long) -1;
val = __Pyx_PyInt_As_long(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to long");
return (long) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to long");
return (long) -1;
}
/* CheckBinaryVersion */
static int __Pyx_check_binary_version(void) {
char ctversion[4], rtversion[4];
PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION);
PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion());
if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) {
char message[200];
PyOS_snprintf(message, sizeof(message),
"compiletime version %s of module '%.100s' "
"does not match runtime version %s",
ctversion, __Pyx_MODULE_NAME, rtversion);
return PyErr_WarnEx(NULL, message, 1);
}
return 0;
}
/* ModuleImport */
#ifndef __PYX_HAVE_RT_ImportModule
#define __PYX_HAVE_RT_ImportModule
static PyObject *__Pyx_ImportModule(const char *name) {
PyObject *py_name = 0;
PyObject *py_module = 0;
py_name = __Pyx_PyIdentifier_FromString(name);
if (!py_name)
goto bad;
py_module = PyImport_Import(py_name);
Py_DECREF(py_name);
return py_module;
bad:
Py_XDECREF(py_name);
return 0;
}
#endif
/* TypeImport */
#ifndef __PYX_HAVE_RT_ImportType
#define __PYX_HAVE_RT_ImportType
static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name,
size_t size, int strict)
{
PyObject *py_module = 0;
PyObject *result = 0;
PyObject *py_name = 0;
char warning[200];
Py_ssize_t basicsize;
#ifdef Py_LIMITED_API
PyObject *py_basicsize;
#endif
py_module = __Pyx_ImportModule(module_name);
if (!py_module)
goto bad;
py_name = __Pyx_PyIdentifier_FromString(class_name);
if (!py_name)
goto bad;
result = PyObject_GetAttr(py_module, py_name);
Py_DECREF(py_name);
py_name = 0;
Py_DECREF(py_module);
py_module = 0;
if (!result)
goto bad;
if (!PyType_Check(result)) {
PyErr_Format(PyExc_TypeError,
"%.200s.%.200s is not a type object",
module_name, class_name);
goto bad;
}
#ifndef Py_LIMITED_API
basicsize = ((PyTypeObject *)result)->tp_basicsize;
#else
py_basicsize = PyObject_GetAttrString(result, "__basicsize__");
if (!py_basicsize)
goto bad;
basicsize = PyLong_AsSsize_t(py_basicsize);
Py_DECREF(py_basicsize);
py_basicsize = 0;
if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred())
goto bad;
#endif
if (!strict && (size_t)basicsize > size) {
PyOS_snprintf(warning, sizeof(warning),
"%s.%s size changed, may indicate binary incompatibility. Expected %zd, got %zd",
module_name, class_name, basicsize, size);
if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad;
}
else if ((size_t)basicsize != size) {
PyErr_Format(PyExc_ValueError,
"%.200s.%.200s has the wrong size, try recompiling. Expected %zd, got %zd",
module_name, class_name, basicsize, size);
goto bad;
}
return (PyTypeObject *)result;
bad:
Py_XDECREF(py_module);
Py_XDECREF(result);
return NULL;
}
#endif
/* InitStrings */
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
while (t->p) {
#if PY_MAJOR_VERSION < 3
if (t->is_unicode) {
*t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL);
} else if (t->intern) {
*t->p = PyString_InternFromString(t->s);
} else {
*t->p = PyString_FromStringAndSize(t->s, t->n - 1);
}
#else
if (t->is_unicode | t->is_str) {
if (t->intern) {
*t->p = PyUnicode_InternFromString(t->s);
} else if (t->encoding) {
*t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL);
} else {
*t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1);
}
} else {
*t->p = PyBytes_FromStringAndSize(t->s, t->n - 1);
}
#endif
if (!*t->p)
return -1;
++t;
}
return 0;
}
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) {
return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str));
}
static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject* o) {
Py_ssize_t ignore;
return __Pyx_PyObject_AsStringAndSize(o, &ignore);
}
static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
#if CYTHON_COMPILING_IN_CPYTHON && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)
if (
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
__Pyx_sys_getdefaultencoding_not_ascii &&
#endif
PyUnicode_Check(o)) {
#if PY_VERSION_HEX < 0x03030000
char* defenc_c;
PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL);
if (!defenc) return NULL;
defenc_c = PyBytes_AS_STRING(defenc);
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
{
char* end = defenc_c + PyBytes_GET_SIZE(defenc);
char* c;
for (c = defenc_c; c < end; c++) {
if ((unsigned char) (*c) >= 128) {
PyUnicode_AsASCIIString(o);
return NULL;
}
}
}
#endif
*length = PyBytes_GET_SIZE(defenc);
return defenc_c;
#else
if (__Pyx_PyUnicode_READY(o) == -1) return NULL;
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
if (PyUnicode_IS_ASCII(o)) {
*length = PyUnicode_GET_LENGTH(o);
return PyUnicode_AsUTF8(o);
} else {
PyUnicode_AsASCIIString(o);
return NULL;
}
#else
return PyUnicode_AsUTF8AndSize(o, length);
#endif
#endif
} else
#endif
#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE))
if (PyByteArray_Check(o)) {
*length = PyByteArray_GET_SIZE(o);
return PyByteArray_AS_STRING(o);
} else
#endif
{
char* result;
int r = PyBytes_AsStringAndSize(o, &result, length);
if (unlikely(r < 0)) {
return NULL;
} else {
return result;
}
}
}
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) {
int is_true = x == Py_True;
if (is_true | (x == Py_False) | (x == Py_None)) return is_true;
else return PyObject_IsTrue(x);
}
static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) {
PyNumberMethods *m;
const char *name = NULL;
PyObject *res = NULL;
#if PY_MAJOR_VERSION < 3
if (PyInt_Check(x) || PyLong_Check(x))
#else
if (PyLong_Check(x))
#endif
return __Pyx_NewRef(x);
m = Py_TYPE(x)->tp_as_number;
#if PY_MAJOR_VERSION < 3
if (m && m->nb_int) {
name = "int";
res = PyNumber_Int(x);
}
else if (m && m->nb_long) {
name = "long";
res = PyNumber_Long(x);
}
#else
if (m && m->nb_int) {
name = "int";
res = PyNumber_Long(x);
}
#endif
if (res) {
#if PY_MAJOR_VERSION < 3
if (!PyInt_Check(res) && !PyLong_Check(res)) {
#else
if (!PyLong_Check(res)) {
#endif
PyErr_Format(PyExc_TypeError,
"__%.4s__ returned non-%.4s (type %.200s)",
name, name, Py_TYPE(res)->tp_name);
Py_DECREF(res);
return NULL;
}
}
else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_TypeError,
"an integer is required");
}
return res;
}
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
Py_ssize_t ival;
PyObject *x;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_CheckExact(b))) {
if (sizeof(Py_ssize_t) >= sizeof(long))
return PyInt_AS_LONG(b);
else
return PyInt_AsSsize_t(x);
}
#endif
if (likely(PyLong_CheckExact(b))) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)b)->ob_digit;
const Py_ssize_t size = Py_SIZE(b);
if (likely(__Pyx_sst_abs(size) <= 1)) {
ival = likely(size) ? digits[0] : 0;
if (size == -1) ival = -ival;
return ival;
} else {
switch (size) {
case 2:
if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -2:
if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case 3:
if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -3:
if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case 4:
if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -4:
if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
}
}
#endif
return PyLong_AsSsize_t(b);
}
x = PyNumber_Index(b);
if (!x) return -1;
ival = PyInt_AsSsize_t(x);
Py_DECREF(x);
return ival;
}
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) {
return PyInt_FromSize_t(ival);
}
#endif /* Py_PYTHON_H */ |
88517f4293649ab9899a188de97712d022cf4cda.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* This file is part of the GROMACS molecular simulation package.
*
* Copyright (c) 2019, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
*
* GROMACS is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public License
* as published by the Free Software Foundation; either version 2.1
* of the License, or (at your option) any later version.
*
* GROMACS is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with GROMACS; if not, see
* http://www.gnu.org/licenses, or write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
* If you want to redistribute modifications to GROMACS, please
* consider that scientific software is very special. Version
* control is crucial - bugs must be traceable. We will be happy to
* consider code for inclusion in the official distribution, but
* derived work must not be called official GROMACS. Details are found
* in the README & COPYING files - if they are missing, get the
* official version at http://www.gromacs.org.
*
* To help us fund GROMACS development, we humbly ask that you cite
* the research papers on the package. Check out http://www.gromacs.org.
*/
/*! \internal \file
*
* \brief Implements Leap-Frog using CUDA
*
* This file contains implementation of basic Leap-Frog integrator
* using CUDA, including class initialization, data-structures management
* and GPU kernel.
*
* \todo Reconsider naming towards using "gpu" suffix instead of "cuda".
*
* \author Artem Zhmurov <[email protected]>
*
* \ingroup module_mdlib
*/
#include "gmxpre.h"
#include "leapfrog_cuda_impl.h"
#include <assert.h>
#include <stdio.h>
#include <cmath>
#include <algorithm>
#include "gromacs/gpu_utils/cudautils.cuh"
#include "gromacs/gpu_utils/devicebuffer.cuh"
#include "gromacs/gpu_utils/gputraits.cuh"
#include "gromacs/gpu_utils/vectype_ops.cuh"
#include "gromacs/math/vec.h"
#include "gromacs/mdlib/leapfrog_cuda.h"
#include "gromacs/pbcutil/pbc.h"
#include "gromacs/pbcutil/pbc_aiuc_cuda.cuh"
namespace gmx
{
//! Number of CUDA threads in a block
constexpr static int c_threadsPerBlock = 256;
//! Maximum number of threads in a block (for __launch_bounds__)
constexpr static int c_maxThreadsPerBlock = c_threadsPerBlock;
/*! \brief Main kernel for Leap-Frog integrator.
*
* Each GPU thread works with a single particle. Empty declaration is needed to
* avoid "no previous prototype for function" clang warning.
*
* \todo Check if the force should be set to zero here.
* \todo This kernel can also accumulate incidental temperatures for each atom.
*
* \param[in] numAtoms Total number of atoms.
* \param[in] gm_x Coordinates before the timestep
* \param[out] gm_xp Coordinates after the timestep.
* \param[in,out] gm_v Velocities to update.
* \param[in] gm_f Atomic forces.
* \param[in] gm_inverseMasses Reciprocal masses.
* \param[in] dt Timestep.
*/
__launch_bounds__(c_maxThreadsPerBlock)
__global__ void leapfrog_kernel(const int numAtoms,
const float3* __restrict__ gm_x,
float3* __restrict__ gm_xp,
float3* __restrict__ gm_v,
const float3* __restrict__ gm_f,
const float* __restrict__ gm_inverseMasses,
const float dt);
__launch_bounds__(c_maxThreadsPerBlock)
__global__ void leapfrog_kernel(const int numAtoms,
const float3* __restrict__ gm_x,
float3* __restrict__ gm_xp,
float3* __restrict__ gm_v,
const float3* __restrict__ gm_f,
const float* __restrict__ gm_inverseMasses,
const float dt)
{
int threadIndex = blockIdx.x*blockDim.x + threadIdx.x;
if (threadIndex < numAtoms)
{
float3 xi = gm_x[threadIndex];
float3 vi = gm_v[threadIndex];
float3 fi = gm_f[threadIndex];
float imi = gm_inverseMasses[threadIndex];
float imidt = imi*dt;
vi += fi*imidt;
xi += vi*dt;
gm_v[threadIndex] = vi;
gm_xp[threadIndex] = xi;
}
return;
}
/*! \brief Integrate
*
* Integrates the equation of motion using Leap-Frog algorithm.
* Updates d_xp_ and d_v_ fields of this object.
*
* \param[in] dt Timestep
*/
void LeapFrogCuda::Impl::integrate(const real dt)
{
ensureNoPendingCudaError("In CUDA version of Leap-Frog integrator");
KernelLaunchConfig config;
config.blockSize[0] = c_threadsPerBlock;
config.blockSize[1] = 1;
config.blockSize[2] = 1;
config.gridSize[0] = (numAtoms_ + c_threadsPerBlock - 1)/c_threadsPerBlock;
config.sharedMemorySize = 0;
config.stream = stream_;
auto kernelPtr = leapfrog_kernel;
const float3 *d_x = d_x_;
float3 *d_xp = d_xp_;
float3 *d_v = d_v_;
const float3 *d_f = d_f_;
const float *d_inverseMasses = d_inverseMasses_;
const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config,
&numAtoms_,
&d_x, &d_xp,
&d_v,
&d_f,
&d_inverseMasses, &dt);
launchGpuKernel(kernelPtr, config, nullptr, "leapfrog_kernel", kernelArgs);
return;
}
/*! \brief Create Leap-Frog object
*
* \param[in] numAtoms Number of atoms.
*/
LeapFrogCuda::Impl::Impl(int numAtoms)
: numAtoms_(numAtoms)
{
allocateDeviceBuffer(&d_x_, numAtoms, nullptr);
allocateDeviceBuffer(&d_xp_, numAtoms, nullptr);
allocateDeviceBuffer(&d_v_, numAtoms, nullptr);
allocateDeviceBuffer(&d_f_, numAtoms, nullptr);
allocateDeviceBuffer(&d_inverseMasses_, numAtoms, nullptr);
// TODO When the code will be integrated into the schedule, it will be assigned non-default stream.
stream_ = nullptr;
}
LeapFrogCuda::Impl::~Impl()
{
freeDeviceBuffer(&d_x_);
freeDeviceBuffer(&d_xp_);
freeDeviceBuffer(&d_v_);
freeDeviceBuffer(&d_f_);
freeDeviceBuffer(&d_inverseMasses_);
}
/*! \brief
* Update PBC data.
*
* Converts pbc data from t_pbc into the PbcAiuc format and stores the latter.
*
* \param[in] pbc The PBC data in t_pbc format.
*/
void LeapFrogCuda::Impl::setPbc(const t_pbc *pbc)
{
setPbcAiuc(pbc->ndim_ePBC, pbc->box, &pbcAiuc_);
}
/*! \brief Set the integrator
*
* Copies inverse masses from CPU to GPU.
*
* \param[in] md MD atoms, from which inverse masses are taken.
*/
void LeapFrogCuda::Impl::set(const t_mdatoms &md)
{
copyToDeviceBuffer(&d_inverseMasses_, (float*)md.invmass,
0, numAtoms_, stream_, GpuApiCallBehavior::Sync, nullptr);
}
/*! \brief
* Copy coordinates from CPU to GPU.
*
* The data are assumed to be in float3/fvec format (single precision).
*
* \param[in] h_x CPU pointer where coordinates should be copied from.
*/
void LeapFrogCuda::Impl::copyCoordinatesToGpu(const rvec *h_x)
{
copyToDeviceBuffer(&d_x_, (float3*)h_x, 0, numAtoms_, stream_, GpuApiCallBehavior::Sync, nullptr);
}
/*! \brief
* Copy velocities from CPU to GPU.
*
* The data are assumed to be in float3/fvec format (single precision).
*
* \param[in] h_v CPU pointer where velocities should be copied from.
*/
void LeapFrogCuda::Impl::copyVelocitiesToGpu(const rvec *h_v)
{
copyToDeviceBuffer(&d_v_, (float3*)h_v, 0, numAtoms_, stream_, GpuApiCallBehavior::Sync, nullptr);
}
/*! \brief
* Copy forces from CPU to GPU.
*
* The data are assumed to be in float3/fvec format (single precision).
*
* \param[in] h_f CPU pointer where forces should be copied from.
*/
void LeapFrogCuda::Impl::copyForcesToGpu(const rvec *h_f)
{
copyToDeviceBuffer(&d_f_, (float3*)h_f, 0, numAtoms_, stream_, GpuApiCallBehavior::Sync, nullptr);
}
/*! \brief
* Copy coordinates from GPU to CPU.
*
* The data are assumed to be in float3/fvec format (single precision).
*
* \param[out] h_xp CPU pointer where coordinates should be copied to.
*/
void LeapFrogCuda::Impl::copyCoordinatesFromGpu(rvec *h_xp)
{
copyFromDeviceBuffer((float3*)h_xp, &d_xp_, 0, numAtoms_, stream_, GpuApiCallBehavior::Sync, nullptr);
}
/*! \brief
* Copy velocities from GPU to CPU.
*
* The velocities are assumed to be in float3/fvec format (single precision).
*
* \param[in] h_v Pointer to velocities data.
*/
void LeapFrogCuda::Impl::copyVelocitiesFromGpu(rvec *h_v)
{
copyFromDeviceBuffer((float3*)h_v, &d_v_, 0, numAtoms_, stream_, GpuApiCallBehavior::Sync, nullptr);
}
/*! \brief
* Copy forces from GPU to CPU.
*
* The forces are assumed to be in float3/fvec format (single precision).
*
* \param[in] h_f Pointer to forces data.
*/
void LeapFrogCuda::Impl::copyForcesFromGpu(rvec *h_f)
{
copyFromDeviceBuffer((float3*)h_f, &d_f_, 0, numAtoms_, stream_, GpuApiCallBehavior::Sync, nullptr);
}
/*! \brief
* Set the internal GPU-memory x, xprime and v pointers.
*
* Data is not copied. The data are assumed to be in float3/fvec format
* (float3 is used internally, but the data layout should be identical).
*
* \param[in] d_x Pointer to the coordinates for the input (on GPU)
* \param[in] d_xp Pointer to the coordinates for the output (on GPU)
* \param[in] d_v Pointer to the velocities (on GPU)
* \param[in] d_f Pointer to the forces (on GPU)
*/
void LeapFrogCuda::Impl::setXVFPointers(rvec *d_x, rvec *d_xp, rvec *d_v, rvec *d_f)
{
d_x_ = (float3*)d_x;
d_xp_ = (float3*)d_xp;
d_v_ = (float3*)d_v;
d_f_ = (float3*)d_f;
}
LeapFrogCuda::LeapFrogCuda(const int numAtoms)
: impl_(new Impl(numAtoms))
{
}
LeapFrogCuda::~LeapFrogCuda() = default;
void LeapFrogCuda::integrate(const real dt)
{
impl_->integrate(dt);
}
void LeapFrogCuda::setPbc(const t_pbc *pbc)
{
impl_->setPbc(pbc);
}
void LeapFrogCuda::set(const t_mdatoms &md)
{
impl_->set(md);
}
void LeapFrogCuda::copyCoordinatesToGpu(const rvec *h_x)
{
impl_->copyCoordinatesToGpu(h_x);
}
void LeapFrogCuda::copyVelocitiesToGpu(const rvec *h_v)
{
impl_->copyVelocitiesToGpu(h_v);
}
void LeapFrogCuda::copyForcesToGpu(const rvec *h_f)
{
impl_->copyForcesToGpu(h_f);
}
void LeapFrogCuda::copyCoordinatesFromGpu(rvec *h_xp)
{
impl_->copyCoordinatesFromGpu(h_xp);
}
void LeapFrogCuda::copyVelocitiesFromGpu(rvec *h_v)
{
impl_->copyVelocitiesFromGpu(h_v);
}
void LeapFrogCuda::copyForcesFromGpu(rvec *h_f)
{
impl_->copyForcesFromGpu(h_f);
}
void LeapFrogCuda::setXVFPointers(rvec *d_x, rvec *d_xp, rvec *d_v, rvec *d_f)
{
impl_->setXVFPointers(d_x, d_xp, d_v, d_f);
}
} //namespace gmx
| 88517f4293649ab9899a188de97712d022cf4cda.cu | /*
* This file is part of the GROMACS molecular simulation package.
*
* Copyright (c) 2019, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
*
* GROMACS is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public License
* as published by the Free Software Foundation; either version 2.1
* of the License, or (at your option) any later version.
*
* GROMACS is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with GROMACS; if not, see
* http://www.gnu.org/licenses, or write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
* If you want to redistribute modifications to GROMACS, please
* consider that scientific software is very special. Version
* control is crucial - bugs must be traceable. We will be happy to
* consider code for inclusion in the official distribution, but
* derived work must not be called official GROMACS. Details are found
* in the README & COPYING files - if they are missing, get the
* official version at http://www.gromacs.org.
*
* To help us fund GROMACS development, we humbly ask that you cite
* the research papers on the package. Check out http://www.gromacs.org.
*/
/*! \internal \file
*
* \brief Implements Leap-Frog using CUDA
*
* This file contains implementation of basic Leap-Frog integrator
* using CUDA, including class initialization, data-structures management
* and GPU kernel.
*
* \todo Reconsider naming towards using "gpu" suffix instead of "cuda".
*
* \author Artem Zhmurov <[email protected]>
*
* \ingroup module_mdlib
*/
#include "gmxpre.h"
#include "leapfrog_cuda_impl.h"
#include <assert.h>
#include <stdio.h>
#include <cmath>
#include <algorithm>
#include "gromacs/gpu_utils/cudautils.cuh"
#include "gromacs/gpu_utils/devicebuffer.cuh"
#include "gromacs/gpu_utils/gputraits.cuh"
#include "gromacs/gpu_utils/vectype_ops.cuh"
#include "gromacs/math/vec.h"
#include "gromacs/mdlib/leapfrog_cuda.h"
#include "gromacs/pbcutil/pbc.h"
#include "gromacs/pbcutil/pbc_aiuc_cuda.cuh"
namespace gmx
{
//! Number of CUDA threads in a block
constexpr static int c_threadsPerBlock = 256;
//! Maximum number of threads in a block (for __launch_bounds__)
constexpr static int c_maxThreadsPerBlock = c_threadsPerBlock;
/*! \brief Main kernel for Leap-Frog integrator.
*
* Each GPU thread works with a single particle. Empty declaration is needed to
* avoid "no previous prototype for function" clang warning.
*
* \todo Check if the force should be set to zero here.
* \todo This kernel can also accumulate incidental temperatures for each atom.
*
* \param[in] numAtoms Total number of atoms.
* \param[in] gm_x Coordinates before the timestep
* \param[out] gm_xp Coordinates after the timestep.
* \param[in,out] gm_v Velocities to update.
* \param[in] gm_f Atomic forces.
* \param[in] gm_inverseMasses Reciprocal masses.
* \param[in] dt Timestep.
*/
__launch_bounds__(c_maxThreadsPerBlock)
__global__ void leapfrog_kernel(const int numAtoms,
const float3* __restrict__ gm_x,
float3* __restrict__ gm_xp,
float3* __restrict__ gm_v,
const float3* __restrict__ gm_f,
const float* __restrict__ gm_inverseMasses,
const float dt);
__launch_bounds__(c_maxThreadsPerBlock)
__global__ void leapfrog_kernel(const int numAtoms,
const float3* __restrict__ gm_x,
float3* __restrict__ gm_xp,
float3* __restrict__ gm_v,
const float3* __restrict__ gm_f,
const float* __restrict__ gm_inverseMasses,
const float dt)
{
int threadIndex = blockIdx.x*blockDim.x + threadIdx.x;
if (threadIndex < numAtoms)
{
float3 xi = gm_x[threadIndex];
float3 vi = gm_v[threadIndex];
float3 fi = gm_f[threadIndex];
float imi = gm_inverseMasses[threadIndex];
float imidt = imi*dt;
vi += fi*imidt;
xi += vi*dt;
gm_v[threadIndex] = vi;
gm_xp[threadIndex] = xi;
}
return;
}
/*! \brief Integrate
*
* Integrates the equation of motion using Leap-Frog algorithm.
* Updates d_xp_ and d_v_ fields of this object.
*
* \param[in] dt Timestep
*/
void LeapFrogCuda::Impl::integrate(const real dt)
{
ensureNoPendingCudaError("In CUDA version of Leap-Frog integrator");
KernelLaunchConfig config;
config.blockSize[0] = c_threadsPerBlock;
config.blockSize[1] = 1;
config.blockSize[2] = 1;
config.gridSize[0] = (numAtoms_ + c_threadsPerBlock - 1)/c_threadsPerBlock;
config.sharedMemorySize = 0;
config.stream = stream_;
auto kernelPtr = leapfrog_kernel;
const float3 *d_x = d_x_;
float3 *d_xp = d_xp_;
float3 *d_v = d_v_;
const float3 *d_f = d_f_;
const float *d_inverseMasses = d_inverseMasses_;
const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config,
&numAtoms_,
&d_x, &d_xp,
&d_v,
&d_f,
&d_inverseMasses, &dt);
launchGpuKernel(kernelPtr, config, nullptr, "leapfrog_kernel", kernelArgs);
return;
}
/*! \brief Create Leap-Frog object
*
* \param[in] numAtoms Number of atoms.
*/
LeapFrogCuda::Impl::Impl(int numAtoms)
: numAtoms_(numAtoms)
{
allocateDeviceBuffer(&d_x_, numAtoms, nullptr);
allocateDeviceBuffer(&d_xp_, numAtoms, nullptr);
allocateDeviceBuffer(&d_v_, numAtoms, nullptr);
allocateDeviceBuffer(&d_f_, numAtoms, nullptr);
allocateDeviceBuffer(&d_inverseMasses_, numAtoms, nullptr);
// TODO When the code will be integrated into the schedule, it will be assigned non-default stream.
stream_ = nullptr;
}
LeapFrogCuda::Impl::~Impl()
{
freeDeviceBuffer(&d_x_);
freeDeviceBuffer(&d_xp_);
freeDeviceBuffer(&d_v_);
freeDeviceBuffer(&d_f_);
freeDeviceBuffer(&d_inverseMasses_);
}
/*! \brief
* Update PBC data.
*
* Converts pbc data from t_pbc into the PbcAiuc format and stores the latter.
*
* \param[in] pbc The PBC data in t_pbc format.
*/
void LeapFrogCuda::Impl::setPbc(const t_pbc *pbc)
{
setPbcAiuc(pbc->ndim_ePBC, pbc->box, &pbcAiuc_);
}
/*! \brief Set the integrator
*
* Copies inverse masses from CPU to GPU.
*
* \param[in] md MD atoms, from which inverse masses are taken.
*/
void LeapFrogCuda::Impl::set(const t_mdatoms &md)
{
copyToDeviceBuffer(&d_inverseMasses_, (float*)md.invmass,
0, numAtoms_, stream_, GpuApiCallBehavior::Sync, nullptr);
}
/*! \brief
* Copy coordinates from CPU to GPU.
*
* The data are assumed to be in float3/fvec format (single precision).
*
* \param[in] h_x CPU pointer where coordinates should be copied from.
*/
void LeapFrogCuda::Impl::copyCoordinatesToGpu(const rvec *h_x)
{
copyToDeviceBuffer(&d_x_, (float3*)h_x, 0, numAtoms_, stream_, GpuApiCallBehavior::Sync, nullptr);
}
/*! \brief
* Copy velocities from CPU to GPU.
*
* The data are assumed to be in float3/fvec format (single precision).
*
* \param[in] h_v CPU pointer where velocities should be copied from.
*/
void LeapFrogCuda::Impl::copyVelocitiesToGpu(const rvec *h_v)
{
copyToDeviceBuffer(&d_v_, (float3*)h_v, 0, numAtoms_, stream_, GpuApiCallBehavior::Sync, nullptr);
}
/*! \brief
* Copy forces from CPU to GPU.
*
* The data are assumed to be in float3/fvec format (single precision).
*
* \param[in] h_f CPU pointer where forces should be copied from.
*/
void LeapFrogCuda::Impl::copyForcesToGpu(const rvec *h_f)
{
copyToDeviceBuffer(&d_f_, (float3*)h_f, 0, numAtoms_, stream_, GpuApiCallBehavior::Sync, nullptr);
}
/*! \brief
* Copy coordinates from GPU to CPU.
*
* The data are assumed to be in float3/fvec format (single precision).
*
* \param[out] h_xp CPU pointer where coordinates should be copied to.
*/
void LeapFrogCuda::Impl::copyCoordinatesFromGpu(rvec *h_xp)
{
copyFromDeviceBuffer((float3*)h_xp, &d_xp_, 0, numAtoms_, stream_, GpuApiCallBehavior::Sync, nullptr);
}
/*! \brief
* Copy velocities from GPU to CPU.
*
* The velocities are assumed to be in float3/fvec format (single precision).
*
* \param[in] h_v Pointer to velocities data.
*/
void LeapFrogCuda::Impl::copyVelocitiesFromGpu(rvec *h_v)
{
copyFromDeviceBuffer((float3*)h_v, &d_v_, 0, numAtoms_, stream_, GpuApiCallBehavior::Sync, nullptr);
}
/*! \brief
* Copy forces from GPU to CPU.
*
* The forces are assumed to be in float3/fvec format (single precision).
*
* \param[in] h_f Pointer to forces data.
*/
void LeapFrogCuda::Impl::copyForcesFromGpu(rvec *h_f)
{
copyFromDeviceBuffer((float3*)h_f, &d_f_, 0, numAtoms_, stream_, GpuApiCallBehavior::Sync, nullptr);
}
/*! \brief
* Set the internal GPU-memory x, xprime and v pointers.
*
* Data is not copied. The data are assumed to be in float3/fvec format
* (float3 is used internally, but the data layout should be identical).
*
* \param[in] d_x Pointer to the coordinates for the input (on GPU)
* \param[in] d_xp Pointer to the coordinates for the output (on GPU)
* \param[in] d_v Pointer to the velocities (on GPU)
* \param[in] d_f Pointer to the forces (on GPU)
*/
void LeapFrogCuda::Impl::setXVFPointers(rvec *d_x, rvec *d_xp, rvec *d_v, rvec *d_f)
{
d_x_ = (float3*)d_x;
d_xp_ = (float3*)d_xp;
d_v_ = (float3*)d_v;
d_f_ = (float3*)d_f;
}
LeapFrogCuda::LeapFrogCuda(const int numAtoms)
: impl_(new Impl(numAtoms))
{
}
LeapFrogCuda::~LeapFrogCuda() = default;
void LeapFrogCuda::integrate(const real dt)
{
impl_->integrate(dt);
}
void LeapFrogCuda::setPbc(const t_pbc *pbc)
{
impl_->setPbc(pbc);
}
void LeapFrogCuda::set(const t_mdatoms &md)
{
impl_->set(md);
}
void LeapFrogCuda::copyCoordinatesToGpu(const rvec *h_x)
{
impl_->copyCoordinatesToGpu(h_x);
}
void LeapFrogCuda::copyVelocitiesToGpu(const rvec *h_v)
{
impl_->copyVelocitiesToGpu(h_v);
}
void LeapFrogCuda::copyForcesToGpu(const rvec *h_f)
{
impl_->copyForcesToGpu(h_f);
}
void LeapFrogCuda::copyCoordinatesFromGpu(rvec *h_xp)
{
impl_->copyCoordinatesFromGpu(h_xp);
}
void LeapFrogCuda::copyVelocitiesFromGpu(rvec *h_v)
{
impl_->copyVelocitiesFromGpu(h_v);
}
void LeapFrogCuda::copyForcesFromGpu(rvec *h_f)
{
impl_->copyForcesFromGpu(h_f);
}
void LeapFrogCuda::setXVFPointers(rvec *d_x, rvec *d_xp, rvec *d_v, rvec *d_f)
{
impl_->setXVFPointers(d_x, d_xp, d_v, d_f);
}
} //namespace gmx
|
f11b3c8ef23f9699ae88212cfaf33cf471f9ef8f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/utils/math/transpose.h"
#include <algorithm>
#include <functional>
#include <numeric>
#include "caffe2/core/common_gpu.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/math/utils.h"
namespace caffe2 {
namespace math {
namespace {
constexpr int kTileDim = 32;
constexpr int kBlockRows = 8;
// Splits the original matrix into submatrices with size 32 * 32.
// Each block transposes one submatrix by loading it into shared memory.
// Reference https://devblogs.nvidia.com/efficient-matrix-transpose-cuda-cc/
template <typename TIndex, typename TData>
__global__ void BatchTranspose2DCUDAKernel(
const TIndex N,
const TIndex H,
const TIndex W,
const TIndex dh,
const TIndex dw,
const TData* X,
TData* Y) {
__shared__ TData tile[kTileDim][kTileDim + 1];
const TIndex n = blockIdx.x / (dh * dw);
const TIndex k = blockIdx.x % (dh * dw);
const TIndex r = k / dw;
const TIndex c = k % dw;
const TIndex offset = n * H * W;
int x = c * kTileDim + threadIdx.x;
int y = r * kTileDim + threadIdx.y;
if (x < W) {
for (int i = 0; threadIdx.y + i < kTileDim && y + i < H; i += kBlockRows) {
#if __CUDA_ARCH__ >= 350 || defined(USE_ROCM)
tile[threadIdx.y + i][threadIdx.x] = __ldg(X + offset + (y + i) * W + x);
#else
tile[threadIdx.y + i][threadIdx.x] = X[offset + (y + i) * W + x];
#endif
}
}
__syncthreads();
x = r * kTileDim + threadIdx.x;
y = c * kTileDim + threadIdx.y;
if (x < H) {
for (int i = 0; threadIdx.y + i < kTileDim && y + i < W; i += kBlockRows) {
Y[offset + (y + i) * H + x] = tile[threadIdx.x][threadIdx.y + i];
}
}
}
template <typename TIndex, typename TData>
void BatchTranspose2DCUDAImpl(
const TIndex N,
const TIndex H,
const TIndex W,
const TData* X,
TData* Y,
CUDAContext* context) {
const TIndex dh = DivUp<TIndex>(H, kTileDim);
const TIndex dw = DivUp<TIndex>(W, kTileDim);
hipLaunchKernelGGL(( BatchTranspose2DCUDAKernel<TIndex, TData>)
, dim3(N * dh * dw), dim3(dim3(kTileDim, kBlockRows)), 0, context->cuda_stream(),
N, H, W, dh, dw, X, Y);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
#define DELEGATE_TRANSPOSE_2D_CUDA_IMPL(TIndex, TData, CuBLASFunc) \
template <> \
void BatchTranspose2DCUDAImpl<TIndex, TData>( \
const TIndex N, \
const TIndex H, \
const TIndex W, \
const TData* X, \
TData* Y, \
CUDAContext* context) { \
if (N == 1) { \
const TData kAlpha = TData(1); \
const TData kBeta = TData(0); \
CUBLAS_ENFORCE(hipblasSetPointerMode( \
context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST)); \
CUBLAS_ENFORCE(CuBLASFunc( \
context->cublas_handle(), \
HIPBLAS_OP_T, \
HIPBLAS_OP_N, \
H, \
W, \
&kAlpha, \
X, \
W, \
&kBeta, \
Y, \
H, \
Y, \
H)); \
} else { \
const TIndex dh = DivUp<TIndex>(H, kTileDim); \
const TIndex dw = DivUp<TIndex>(W, kTileDim); \
hipLaunchKernelGGL(( BatchTranspose2DCUDAKernel<TIndex, TData>) \
, dim3(N * dh * dw), \
dim3(kTileDim, kBlockRows), \
0, \
context->cuda_stream(), N, H, W, dh, dw, X, Y); \
C10_HIP_KERNEL_LAUNCH_CHECK(); \
} \
}
DELEGATE_TRANSPOSE_2D_CUDA_IMPL(std::int32_t, float, hipblasSgeam)
DELEGATE_TRANSPOSE_2D_CUDA_IMPL(std::int64_t, float, hipblasSgeam)
DELEGATE_TRANSPOSE_2D_CUDA_IMPL(std::int32_t, double, hipblasDgeam)
DELEGATE_TRANSPOSE_2D_CUDA_IMPL(std::int64_t, double, hipblasDgeam)
#undef DELEGATE_TRANSPOSE_2D_CUDA_IMPL
template <typename TIndex, typename TData, int D>
__global__ void TransposeCUDAKernel(
const TIndex size,
const SimpleArray<TIndex, D> X_strides,
const SimpleArray<TIndex, D> Y_dims,
const TData* X,
TData* Y) {
const int Y_index = blockIdx.x * CAFFE_CUDA_NUM_THREADS + threadIdx.x;
if (Y_index < size) {
TIndex X_index = 0;
TIndex v = Y_index;
#pragma unroll
for (int i = D - 1; i >= 0; --i) {
X_index += v % Y_dims.data[i] * X_strides.data[i];
v /= Y_dims.data[i];
}
#if __CUDA_ARCH__ >= 350 || defined(USE_ROCM)
Y[Y_index] = __ldg(X + X_index);
#else
Y[Y_index] = X[X_index];
#endif
}
}
template <typename TIndex, typename TData, int D>
void TransposeCUDAImpl(
const TIndex* dims,
const int* axes,
const TData* X,
TData* Y,
CUDAContext* context) {
SimpleArray<TIndex, D> X_strides;
SimpleArray<TIndex, D> Y_dims;
utils::ComputeTransposedStrides<TIndex>(D, dims, axes, X_strides.data);
TIndex size = 1;
for (int i = 0; i < D; ++i) {
Y_dims.data[i] = dims[axes[i]];
size *= dims[i];
}
const TIndex M = DivUp<TIndex>(size, CAFFE_CUDA_NUM_THREADS);
hipLaunchKernelGGL(( TransposeCUDAKernel<TIndex, TData, D>)
, dim3(M), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(),
size, X_strides, Y_dims, X, Y);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(TIndex, TData) \
template <> \
CAFFE2_CUDA_EXPORT void Transpose<TIndex, TData, CUDAContext>( \
const int ndim, \
const TIndex* dims, \
const int* axes, \
const TData* X, \
TData* Y, \
CUDAContext* context) { \
const TIndex size = std::accumulate( \
dims, dims + ndim, TIndex(1), std::multiplies<TIndex>()); \
if (size == 0) { \
return; \
} \
if (utils::IsIdentityPermutation(ndim, axes)) { \
context->template CopySameDevice<TData>(size, X, Y); \
return; \
} \
if (utils::IsBatchTranspose2D(ndim, axes)) { \
const int H = dims[ndim - 2]; \
const int W = dims[ndim - 1]; \
const int N = size / (H * W); \
BatchTranspose2DCUDAImpl<TIndex, TData>(N, H, W, X, Y, context); \
return; \
} \
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_2( \
ndim, TransposeCUDAImpl, TIndex, TData, dims, axes, X, Y, context); \
}
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(std::int32_t, float)
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(std::int64_t, float)
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(std::int32_t, double)
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(std::int64_t, double)
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(std::int32_t, std::int32_t)
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(std::int64_t, std::int32_t)
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(std::int32_t, std::int64_t)
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(std::int64_t, std::int64_t)
#undef CAFFE2_SPECIALIZED_CUDA_TRANSPOSE
#define CAFFE2_SPECIALIZED_CUDA_NCHW2NHWC(T) \
template <> \
CAFFE2_CUDA_EXPORT void NCHW2NHWC<T, CUDAContext>( \
const int N, \
const int C, \
const int HxW, \
const T* X, \
T* Y, \
CUDAContext* context) { \
BatchTranspose2DCUDAImpl<int, T>(N, C, HxW, X, Y, context); \
}
CAFFE2_SPECIALIZED_CUDA_NCHW2NHWC(float)
#undef CAFFE2_SPECIALIZED_CUDA_NCHW2NHWC
#define CAFFE2_SPECIALIZED_CUDA_NHWC2NCHW(T) \
template <> \
CAFFE2_CUDA_EXPORT void NHWC2NCHW<T, CUDAContext>( \
const int N, \
const int C, \
const int HxW, \
const T* X, \
T* Y, \
CUDAContext* context) { \
BatchTranspose2DCUDAImpl<int, T>(N, HxW, C, X, Y, context); \
}
CAFFE2_SPECIALIZED_CUDA_NHWC2NCHW(float)
#undef CAFFE2_SPECIALIZED_CUDA_NHWC2NCHW
} // namespace math
} // namespace caffe2
| f11b3c8ef23f9699ae88212cfaf33cf471f9ef8f.cu | #include "caffe2/utils/math/transpose.h"
#include <algorithm>
#include <functional>
#include <numeric>
#include "caffe2/core/common_gpu.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/math/utils.h"
namespace caffe2 {
namespace math {
namespace {
constexpr int kTileDim = 32;
constexpr int kBlockRows = 8;
// Splits the original matrix into submatrices with size 32 * 32.
// Each block transposes one submatrix by loading it into shared memory.
// Reference https://devblogs.nvidia.com/efficient-matrix-transpose-cuda-cc/
template <typename TIndex, typename TData>
__global__ void BatchTranspose2DCUDAKernel(
const TIndex N,
const TIndex H,
const TIndex W,
const TIndex dh,
const TIndex dw,
const TData* X,
TData* Y) {
__shared__ TData tile[kTileDim][kTileDim + 1];
const TIndex n = blockIdx.x / (dh * dw);
const TIndex k = blockIdx.x % (dh * dw);
const TIndex r = k / dw;
const TIndex c = k % dw;
const TIndex offset = n * H * W;
int x = c * kTileDim + threadIdx.x;
int y = r * kTileDim + threadIdx.y;
if (x < W) {
for (int i = 0; threadIdx.y + i < kTileDim && y + i < H; i += kBlockRows) {
#if __CUDA_ARCH__ >= 350 || defined(USE_ROCM)
tile[threadIdx.y + i][threadIdx.x] = __ldg(X + offset + (y + i) * W + x);
#else
tile[threadIdx.y + i][threadIdx.x] = X[offset + (y + i) * W + x];
#endif
}
}
__syncthreads();
x = r * kTileDim + threadIdx.x;
y = c * kTileDim + threadIdx.y;
if (x < H) {
for (int i = 0; threadIdx.y + i < kTileDim && y + i < W; i += kBlockRows) {
Y[offset + (y + i) * H + x] = tile[threadIdx.x][threadIdx.y + i];
}
}
}
template <typename TIndex, typename TData>
void BatchTranspose2DCUDAImpl(
const TIndex N,
const TIndex H,
const TIndex W,
const TData* X,
TData* Y,
CUDAContext* context) {
const TIndex dh = DivUp<TIndex>(H, kTileDim);
const TIndex dw = DivUp<TIndex>(W, kTileDim);
BatchTranspose2DCUDAKernel<TIndex, TData>
<<<N * dh * dw, dim3(kTileDim, kBlockRows), 0, context->cuda_stream()>>>(
N, H, W, dh, dw, X, Y);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
#define DELEGATE_TRANSPOSE_2D_CUDA_IMPL(TIndex, TData, CuBLASFunc) \
template <> \
void BatchTranspose2DCUDAImpl<TIndex, TData>( \
const TIndex N, \
const TIndex H, \
const TIndex W, \
const TData* X, \
TData* Y, \
CUDAContext* context) { \
if (N == 1) { \
const TData kAlpha = TData(1); \
const TData kBeta = TData(0); \
CUBLAS_ENFORCE(cublasSetPointerMode( \
context->cublas_handle(), CUBLAS_POINTER_MODE_HOST)); \
CUBLAS_ENFORCE(CuBLASFunc( \
context->cublas_handle(), \
CUBLAS_OP_T, \
CUBLAS_OP_N, \
H, \
W, \
&kAlpha, \
X, \
W, \
&kBeta, \
Y, \
H, \
Y, \
H)); \
} else { \
const TIndex dh = DivUp<TIndex>(H, kTileDim); \
const TIndex dw = DivUp<TIndex>(W, kTileDim); \
BatchTranspose2DCUDAKernel<TIndex, TData> \
<<<N * dh * dw, \
dim3(kTileDim, kBlockRows), \
0, \
context->cuda_stream()>>>(N, H, W, dh, dw, X, Y); \
C10_CUDA_KERNEL_LAUNCH_CHECK(); \
} \
}
DELEGATE_TRANSPOSE_2D_CUDA_IMPL(std::int32_t, float, cublasSgeam)
DELEGATE_TRANSPOSE_2D_CUDA_IMPL(std::int64_t, float, cublasSgeam)
DELEGATE_TRANSPOSE_2D_CUDA_IMPL(std::int32_t, double, cublasDgeam)
DELEGATE_TRANSPOSE_2D_CUDA_IMPL(std::int64_t, double, cublasDgeam)
#undef DELEGATE_TRANSPOSE_2D_CUDA_IMPL
template <typename TIndex, typename TData, int D>
__global__ void TransposeCUDAKernel(
const TIndex size,
const SimpleArray<TIndex, D> X_strides,
const SimpleArray<TIndex, D> Y_dims,
const TData* X,
TData* Y) {
const int Y_index = blockIdx.x * CAFFE_CUDA_NUM_THREADS + threadIdx.x;
if (Y_index < size) {
TIndex X_index = 0;
TIndex v = Y_index;
#pragma unroll
for (int i = D - 1; i >= 0; --i) {
X_index += v % Y_dims.data[i] * X_strides.data[i];
v /= Y_dims.data[i];
}
#if __CUDA_ARCH__ >= 350 || defined(USE_ROCM)
Y[Y_index] = __ldg(X + X_index);
#else
Y[Y_index] = X[X_index];
#endif
}
}
template <typename TIndex, typename TData, int D>
void TransposeCUDAImpl(
const TIndex* dims,
const int* axes,
const TData* X,
TData* Y,
CUDAContext* context) {
SimpleArray<TIndex, D> X_strides;
SimpleArray<TIndex, D> Y_dims;
utils::ComputeTransposedStrides<TIndex>(D, dims, axes, X_strides.data);
TIndex size = 1;
for (int i = 0; i < D; ++i) {
Y_dims.data[i] = dims[axes[i]];
size *= dims[i];
}
const TIndex M = DivUp<TIndex>(size, CAFFE_CUDA_NUM_THREADS);
TransposeCUDAKernel<TIndex, TData, D>
<<<M, CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(
size, X_strides, Y_dims, X, Y);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(TIndex, TData) \
template <> \
CAFFE2_CUDA_EXPORT void Transpose<TIndex, TData, CUDAContext>( \
const int ndim, \
const TIndex* dims, \
const int* axes, \
const TData* X, \
TData* Y, \
CUDAContext* context) { \
const TIndex size = std::accumulate( \
dims, dims + ndim, TIndex(1), std::multiplies<TIndex>()); \
if (size == 0) { \
return; \
} \
if (utils::IsIdentityPermutation(ndim, axes)) { \
context->template CopySameDevice<TData>(size, X, Y); \
return; \
} \
if (utils::IsBatchTranspose2D(ndim, axes)) { \
const int H = dims[ndim - 2]; \
const int W = dims[ndim - 1]; \
const int N = size / (H * W); \
BatchTranspose2DCUDAImpl<TIndex, TData>(N, H, W, X, Y, context); \
return; \
} \
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_2( \
ndim, TransposeCUDAImpl, TIndex, TData, dims, axes, X, Y, context); \
}
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(std::int32_t, float)
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(std::int64_t, float)
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(std::int32_t, double)
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(std::int64_t, double)
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(std::int32_t, std::int32_t)
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(std::int64_t, std::int32_t)
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(std::int32_t, std::int64_t)
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(std::int64_t, std::int64_t)
#undef CAFFE2_SPECIALIZED_CUDA_TRANSPOSE
#define CAFFE2_SPECIALIZED_CUDA_NCHW2NHWC(T) \
template <> \
CAFFE2_CUDA_EXPORT void NCHW2NHWC<T, CUDAContext>( \
const int N, \
const int C, \
const int HxW, \
const T* X, \
T* Y, \
CUDAContext* context) { \
BatchTranspose2DCUDAImpl<int, T>(N, C, HxW, X, Y, context); \
}
CAFFE2_SPECIALIZED_CUDA_NCHW2NHWC(float)
#undef CAFFE2_SPECIALIZED_CUDA_NCHW2NHWC
#define CAFFE2_SPECIALIZED_CUDA_NHWC2NCHW(T) \
template <> \
CAFFE2_CUDA_EXPORT void NHWC2NCHW<T, CUDAContext>( \
const int N, \
const int C, \
const int HxW, \
const T* X, \
T* Y, \
CUDAContext* context) { \
BatchTranspose2DCUDAImpl<int, T>(N, HxW, C, X, Y, context); \
}
CAFFE2_SPECIALIZED_CUDA_NHWC2NCHW(float)
#undef CAFFE2_SPECIALIZED_CUDA_NHWC2NCHW
} // namespace math
} // namespace caffe2
|
79466fe3bc257bd7416c6d169d23c28fc03e109d.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Vector addition: C = A + B.
*
* This sample is a very basic sample that implements element by element
* vector addition.
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <hip/hip_runtime.h>
/**
* Get parameters from STDIN.
*/
static void read_from_stdin(int *gid, int *n)
{
char *s, buf[1024];
fgets(buf, 1023, stdin);
if ((s = strchr(buf, '#')) != NULL) *s = '\0';
*gid = atoi(buf);
fgets(buf, 1023, stdin);
if ((s = strchr(buf, '#')) != NULL) *s = '\0';
*n = atoi(buf);
}
/**
* CUDA Kernel Device code
*
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements.
*/
__global__ void
vectorAdd(const float *A, const float *B, float *C, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i] + B[i];
}
}
/**
* Host main routine
*/
int main(int argc, char **argv)
{
int gid, numElements;
// Error code to check return values for CUDA calls
hipError_t err = hipSuccess;
read_from_stdin(&gid, &numElements);
err = hipSetDevice(gid);
if (err != hipSuccess) {
printf("!!! Cannot select GPU with device ID = %d\n", gid);
exit(1);
}
printf("Choose GPU with device ID = %d\n", gid);
// Print the vector length to be used, and compute its size
size_t size = numElements * sizeof(float);
printf("[Vector addition of %d elements]\n", numElements);
// Allocate the input vector A in the host (CPU)
float *h_A = (float *)malloc(size);
// Allocate the input vector B in the host
float *h_B = (float *)malloc(size);
// Allocate the output vector C in the host
float *h_C = (float *)malloc(size);
// Verify that allocations succeeded
if (h_A == NULL || h_B == NULL || h_C == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input vectors with random numbers with uniform distribution in (0,1)
for (int i = 0; i < numElements; ++i)
{
h_A[i] = rand()/(float)RAND_MAX;
h_B[i] = rand()/(float)RAND_MAX;
}
// Allocate the input vector A in device
float *d_A = NULL;
err = hipMalloc((void **)&d_A, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the input vector B in device
float *d_B = NULL;
err = hipMalloc((void **)&d_B, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the output vector C in device
float *d_C = NULL;
err = hipMalloc((void **)&d_C, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the input vectors A and B from the host memory to the device memory
printf("Copy input vectors from the host memory to the CUDA device\n");
err = hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Launch the Vector Add CUDA Kernel
int threadsPerBlock = 256;
int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
hipLaunchKernelGGL(( vectorAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, numElements);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Verify that the result vector is correct
for (int i = 0; i < numElements; ++i)
{
if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
printf("Test PASSED\n");
// Free device global memory
err = hipFree(d_A);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_B);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector B (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_C);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector C (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Free host memory
free(h_A);
free(h_B);
free(h_C);
printf("Done\n");
return 0;
}
| 79466fe3bc257bd7416c6d169d23c28fc03e109d.cu | /**
* Vector addition: C = A + B.
*
* This sample is a very basic sample that implements element by element
* vector addition.
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
/**
* Get parameters from STDIN.
*/
static void read_from_stdin(int *gid, int *n)
{
char *s, buf[1024];
fgets(buf, 1023, stdin);
if ((s = strchr(buf, '#')) != NULL) *s = '\0';
*gid = atoi(buf);
fgets(buf, 1023, stdin);
if ((s = strchr(buf, '#')) != NULL) *s = '\0';
*n = atoi(buf);
}
/**
* CUDA Kernel Device code
*
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements.
*/
__global__ void
vectorAdd(const float *A, const float *B, float *C, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i] + B[i];
}
}
/**
* Host main routine
*/
int main(int argc, char **argv)
{
int gid, numElements;
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
read_from_stdin(&gid, &numElements);
err = cudaSetDevice(gid);
if (err != cudaSuccess) {
printf("!!! Cannot select GPU with device ID = %d\n", gid);
exit(1);
}
printf("Choose GPU with device ID = %d\n", gid);
// Print the vector length to be used, and compute its size
size_t size = numElements * sizeof(float);
printf("[Vector addition of %d elements]\n", numElements);
// Allocate the input vector A in the host (CPU)
float *h_A = (float *)malloc(size);
// Allocate the input vector B in the host
float *h_B = (float *)malloc(size);
// Allocate the output vector C in the host
float *h_C = (float *)malloc(size);
// Verify that allocations succeeded
if (h_A == NULL || h_B == NULL || h_C == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input vectors with random numbers with uniform distribution in (0,1)
for (int i = 0; i < numElements; ++i)
{
h_A[i] = rand()/(float)RAND_MAX;
h_B[i] = rand()/(float)RAND_MAX;
}
// Allocate the input vector A in device
float *d_A = NULL;
err = cudaMalloc((void **)&d_A, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the input vector B in device
float *d_B = NULL;
err = cudaMalloc((void **)&d_B, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the output vector C in device
float *d_C = NULL;
err = cudaMalloc((void **)&d_C, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the input vectors A and B from the host memory to the device memory
printf("Copy input vectors from the host memory to the CUDA device\n");
err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Launch the Vector Add CUDA Kernel
int threadsPerBlock = 256;
int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
vectorAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, numElements);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Verify that the result vector is correct
for (int i = 0; i < numElements; ++i)
{
if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
printf("Test PASSED\n");
// Free device global memory
err = cudaFree(d_A);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_B);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_C);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Free host memory
free(h_A);
free(h_B);
free(h_C);
printf("Done\n");
return 0;
}
|
4d90a204f1e93cd074cb67b42bac00011c1f3052.hip | // !!! This is a file automatically generated by hipify!!!
// Andrew Gloster
// May 2018
// Copyright 2018 Andrew Gloster
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*! \file custenCreateDestroy2DYpFun.cu
Functions to create and destroy the cuSten_t that is used to give input to the compute kernels.
2D y direction, periodic
*/
// ---------------------------------------------------------------------
// Standard Libraries and Headers
// ---------------------------------------------------------------------
#include <iostream>
// ---------------------------------------------------------------------
// Custom libraries and headers
// ---------------------------------------------------------------------
#include "cuSten_struct_type.h"
#include "cuSten_struct_functions.h"
#include "../util/util.h"
// ---------------------------------------------------------------------
// Function to create the struct
// ---------------------------------------------------------------------
/*! \fun void cuStenCreate2DYp
\brief Function to set up cuSten_t
\param pt_cuSten Pointer to cuSten type provided by user
\param numTiles Number of tiles to divide the data into
\param nx Total number of points in the x direction
\param ny Total number of points in the y direction
\param BLOCK_X Size of thread block in the x direction
\param BLOCK_Y Size of thread block in the y direction
\param dataOutput Pointer to data output by the function
\param dataInput Pointer to data input to the function
\param weights Pointer to the weights for the stencil
\param numStenSten Total number of points in the stencil in the y direction
\param numStenTop Number of points on the top of the stencil
\param numStenBottom Number of points on the bottom of the stencil
*/
void cuStenCreate2DYp(
cuSten_t* pt_cuSten,
int deviceNum,
int numTiles,
int nx,
int ny,
int BLOCK_X,
int BLOCK_Y,
double* dataOutput,
double* dataInput,
double* weights,
int numSten,
int numStenTop,
int numStenBottom
)
{
// Buffer used for error checking
char msgStringBuffer[1024];
// Set the device number associated with the struct
pt_cuSten->deviceNum = deviceNum;
// Set the number of streams
pt_cuSten->numStreams = 3;
// Set the number of tiles
pt_cuSten->numTiles = numTiles;
// Set the number points in x on the device
pt_cuSten->nx = nx;
// Set the number points in y on the device
pt_cuSten->ny = ny;
// Number of threads in x on the device
pt_cuSten->BLOCK_X = BLOCK_X;
// Number of threads in y on the device
pt_cuSten->BLOCK_Y = BLOCK_Y;
// Set current active compute device
hipSetDevice(pt_cuSten->deviceNum);
sprintf(msgStringBuffer, "Setting current device to GPU %d", pt_cuSten->deviceNum);
checkError(msgStringBuffer);
// Create memeory for the streams
pt_cuSten->streams = (hipStream_t*)malloc(pt_cuSten->numStreams * sizeof(hipStream_t*));
// Create the streams
for (int st = 0; st < pt_cuSten->numStreams; st++)
{
hipStreamCreate(&pt_cuSten->streams[st]);
sprintf(msgStringBuffer, "Creating stream %d on GPU %d", st, pt_cuSten->deviceNum);
checkError(msgStringBuffer);
}
// Create memeory for the events
pt_cuSten->events = (hipEvent_t*)malloc(2 * sizeof(hipEvent_t*));
// Create the events
for (int ev = 0; ev < 2; ev++)
{
hipEventCreate(&pt_cuSten->events[ev]);
sprintf(msgStringBuffer, "Creating event %d on GPU %d", ev, pt_cuSten->deviceNum);
checkError(msgStringBuffer);
}
// Set number of points in the stencil
pt_cuSten->numSten = numSten;
// Set number of points to the left in the stencil
pt_cuSten->numStenTop = numStenTop;
// Set number of points to the right in the stencil
pt_cuSten->numStenBottom = numStenBottom;
// Set local block array sizes - x direction
pt_cuSten->nxLocal = pt_cuSten->BLOCK_X;
// Set loacl block array sizes - y direction
pt_cuSten->nyLocal = pt_cuSten->BLOCK_Y + pt_cuSten->numStenTop + pt_cuSten->numStenBottom;
// Set the amount of shared memory required
pt_cuSten->mem_shared = (pt_cuSten->nxLocal * pt_cuSten->nyLocal) * sizeof(double) + pt_cuSten->numSten * sizeof(double);
// Find number of points per tile
pt_cuSten->nyTile = pt_cuSten->ny / pt_cuSten->numTiles;
// Set the grid up
pt_cuSten->xGrid = (pt_cuSten->nx % pt_cuSten->BLOCK_X == 0) ? (pt_cuSten->nx / pt_cuSten->BLOCK_X) : (pt_cuSten->nx / pt_cuSten->BLOCK_X + 1);
pt_cuSten->yGrid = (pt_cuSten->nyTile % pt_cuSten->BLOCK_Y == 0) ? (pt_cuSten->nyTile / pt_cuSten->BLOCK_Y) : (pt_cuSten->nyTile / pt_cuSten->BLOCK_Y + 1);
// Set the device weights pointer
pt_cuSten->weights = weights;
// Allocate the pointers for each input tile
pt_cuSten->dataInput = (double**)malloc(pt_cuSten->numTiles * sizeof(double));
// Allocate the pointers for each output tile
pt_cuSten->dataOutput = (double**)malloc(pt_cuSten->numTiles * sizeof(double));
// // Tile offset index
int offset = pt_cuSten->nx * pt_cuSten->nyTile;
// // Match the pointers to the data
for (int tile = 0; tile < pt_cuSten->numTiles; tile++)
{
// Set the input data
pt_cuSten->dataInput[tile] = &dataInput[tile * offset];
// Set the output data
pt_cuSten->dataOutput[tile] = &dataOutput[tile * offset];
}
// Create cases depending on what tile numbers - Periodic
// 1 tile
// 2 tiles
// 3 or greater
// Allocate top boundary memory
pt_cuSten->boundaryTop = (double**)malloc(pt_cuSten->numTiles * sizeof(double));
// Allocate bottom boundary memory
pt_cuSten->boundaryBottom = (double**)malloc(pt_cuSten->numTiles * sizeof(double));
switch(pt_cuSten->numTiles)
{
// One tile only requires single top and bottom to be set
case 1:
pt_cuSten->boundaryTop[0] = &dataInput[(pt_cuSten->ny - pt_cuSten->numStenTop) * pt_cuSten->nx];
pt_cuSten->boundaryBottom[0] = &dataInput[0];
break;
// Two tiles requires a special case of only setting two tiles
case 2:
pt_cuSten->boundaryTop[0] = &dataInput[(pt_cuSten->ny - pt_cuSten->numStenTop) * pt_cuSten->nx];
pt_cuSten->boundaryBottom[0] = &dataInput[pt_cuSten->nyTile * pt_cuSten->nx];
pt_cuSten->boundaryTop[1] = &dataInput[(pt_cuSten->nyTile - pt_cuSten->numStenTop) * pt_cuSten->nx];
pt_cuSten->boundaryBottom[1] = &dataInput[0];
break;
// Default case has interiors, so set the top tile, then loop over interior, then set the bottom tile
default:
pt_cuSten->boundaryTop[0] = &dataInput[(pt_cuSten->ny - pt_cuSten->numStenTop) * pt_cuSten->nx];
pt_cuSten->boundaryBottom[0] = &dataInput[pt_cuSten->nyTile * pt_cuSten->nx];
for (int tile = 1; tile < pt_cuSten->numTiles - 1; tile++)
{
pt_cuSten->boundaryTop[tile] = &dataInput[(pt_cuSten->nyTile * tile - pt_cuSten->numStenTop) * pt_cuSten->nx];
pt_cuSten->boundaryBottom[tile] = &dataInput[(pt_cuSten->nyTile * (tile + 1)) * pt_cuSten->nx];
}
pt_cuSten->boundaryTop[pt_cuSten->numTiles - 1] = &dataInput[(pt_cuSten->nyTile * (pt_cuSten->numTiles - 1) - pt_cuSten->numStenTop) * pt_cuSten->nx];
pt_cuSten->boundaryBottom[pt_cuSten->numTiles - 1] = &dataInput[0];
break;
}
// Number of points in top boundary data
pt_cuSten->numBoundaryTop = pt_cuSten->numStenTop * pt_cuSten->nx;
// Number of points in bottom boundary data
pt_cuSten->numBoundaryBottom = pt_cuSten->numStenBottom * pt_cuSten->nx;
}
// ---------------------------------------------------------------------
// Swap pointers
// ---------------------------------------------------------------------
/*! \fun void cuStenSwap2DYp
\brief Function to swap pointers necessary for timestepping
\param pt_cuSten Pointer to cuSten type provided by user
\param dataInput Pointer to data input to the on the next compute
*/
void cuStenSwap2DYp(
cuSten_t* pt_cuSten,
double* dataInput
)
{
for (int tile = 0; tile < pt_cuSten->numTiles; tile++)
{
// Swap the input and output data
std::swap(pt_cuSten->dataInput[tile], pt_cuSten->dataOutput[tile]);
// Update the boundary data
switch(pt_cuSten->numTiles)
{
// One tile only requires single top and bottom to be set
case 1:
pt_cuSten->boundaryTop[0] = &dataInput[(pt_cuSten->ny - pt_cuSten->numStenTop) * pt_cuSten->nx];
pt_cuSten->boundaryBottom[0] = &dataInput[0];
break;
// Two tiles requires a special case of only setting two tiles
case 2:
pt_cuSten->boundaryTop[0] = &dataInput[(pt_cuSten->ny - pt_cuSten->numStenTop) * pt_cuSten->nx];
pt_cuSten->boundaryBottom[0] = &dataInput[pt_cuSten->nyTile * pt_cuSten->nx];
pt_cuSten->boundaryTop[1] = &dataInput[(pt_cuSten->nyTile - pt_cuSten->numStenTop) * pt_cuSten->nx];
pt_cuSten->boundaryBottom[1] = &dataInput[0];
break;
// Default case has interiors, so set the top tile, then loop over interior, then set the bottom tile
default:
pt_cuSten->boundaryTop[0] = &dataInput[(pt_cuSten->ny - pt_cuSten->numStenTop) * pt_cuSten->nx];
pt_cuSten->boundaryBottom[0] = &dataInput[pt_cuSten->nyTile * pt_cuSten->nx];
for (int tile = 1; tile < pt_cuSten->numTiles - 1; tile++)
{
pt_cuSten->boundaryTop[tile] = &dataInput[(pt_cuSten->nyTile * tile - pt_cuSten->numStenTop) * pt_cuSten->nx];
pt_cuSten->boundaryBottom[tile] = &dataInput[(pt_cuSten->nyTile * (tile + 1)) * pt_cuSten->nx];
}
pt_cuSten->boundaryTop[pt_cuSten->numTiles - 1] = &dataInput[(pt_cuSten->nyTile * (pt_cuSten->numTiles - 1) - pt_cuSten->numStenTop) * pt_cuSten->nx];
pt_cuSten->boundaryBottom[pt_cuSten->numTiles - 1] = &dataInput[0];
break;
}
}
}
// ---------------------------------------------------------------------
// Function to destroy the struct
// ---------------------------------------------------------------------
/*! \fun void cuStenDestroy2DYp
\brief Function to destroy data associated with cuSten_t
\param pt_cuSten Pointer to cuSten type provided by user
*/
void cuStenDestroy2DYp(
cuSten_t* pt_cuSten
)
{
// Buffer used for error checking
char msgStringBuffer[1024];
// Set current active compute device
hipSetDevice(pt_cuSten->deviceNum);
sprintf(msgStringBuffer, "Setting current device to GPU %d", pt_cuSten->deviceNum);
checkError(msgStringBuffer);
// Destroy the streams
for (int st = 0; st < pt_cuSten->numStreams; st++)
{
hipStreamDestroy(pt_cuSten->streams[st]);
sprintf(msgStringBuffer, "Destroying stream %d on GPU %d", st, pt_cuSten->deviceNum);
checkError(msgStringBuffer);
}
// Free the main memory
free(pt_cuSten->streams);
// // Create the events
for (int ev = 0; ev < 2; ev++)
{
hipEventDestroy(pt_cuSten->events[ev]);
sprintf(msgStringBuffer, "Destroying event %d on GPU %d", ev, pt_cuSten->deviceNum);
checkError(msgStringBuffer);
}
// Free the main memory
free(pt_cuSten->events);
// Free the pointers for each input tile
free(pt_cuSten->dataInput);
// Free the pointers for each output tile
free(pt_cuSten->dataOutput);
// Free the top boundary tile pointers
free(pt_cuSten->boundaryTop);
// Free the bottom boundary tile pointers
free(pt_cuSten->boundaryBottom);
}
// ---------------------------------------------------------------------
// End of file
// --------------------------------------------------------------------- | 4d90a204f1e93cd074cb67b42bac00011c1f3052.cu | // Andrew Gloster
// May 2018
// Copyright 2018 Andrew Gloster
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*! \file custenCreateDestroy2DYpFun.cu
Functions to create and destroy the cuSten_t that is used to give input to the compute kernels.
2D y direction, periodic
*/
// ---------------------------------------------------------------------
// Standard Libraries and Headers
// ---------------------------------------------------------------------
#include <iostream>
// ---------------------------------------------------------------------
// Custom libraries and headers
// ---------------------------------------------------------------------
#include "cuSten_struct_type.h"
#include "cuSten_struct_functions.h"
#include "../util/util.h"
// ---------------------------------------------------------------------
// Function to create the struct
// ---------------------------------------------------------------------
/*! \fun void cuStenCreate2DYp
\brief Function to set up cuSten_t
\param pt_cuSten Pointer to cuSten type provided by user
\param numTiles Number of tiles to divide the data into
\param nx Total number of points in the x direction
\param ny Total number of points in the y direction
\param BLOCK_X Size of thread block in the x direction
\param BLOCK_Y Size of thread block in the y direction
\param dataOutput Pointer to data output by the function
\param dataInput Pointer to data input to the function
\param weights Pointer to the weights for the stencil
\param numStenSten Total number of points in the stencil in the y direction
\param numStenTop Number of points on the top of the stencil
\param numStenBottom Number of points on the bottom of the stencil
*/
void cuStenCreate2DYp(
cuSten_t* pt_cuSten,
int deviceNum,
int numTiles,
int nx,
int ny,
int BLOCK_X,
int BLOCK_Y,
double* dataOutput,
double* dataInput,
double* weights,
int numSten,
int numStenTop,
int numStenBottom
)
{
// Buffer used for error checking
char msgStringBuffer[1024];
// Set the device number associated with the struct
pt_cuSten->deviceNum = deviceNum;
// Set the number of streams
pt_cuSten->numStreams = 3;
// Set the number of tiles
pt_cuSten->numTiles = numTiles;
// Set the number points in x on the device
pt_cuSten->nx = nx;
// Set the number points in y on the device
pt_cuSten->ny = ny;
// Number of threads in x on the device
pt_cuSten->BLOCK_X = BLOCK_X;
// Number of threads in y on the device
pt_cuSten->BLOCK_Y = BLOCK_Y;
// Set current active compute device
cudaSetDevice(pt_cuSten->deviceNum);
sprintf(msgStringBuffer, "Setting current device to GPU %d", pt_cuSten->deviceNum);
checkError(msgStringBuffer);
// Create memeory for the streams
pt_cuSten->streams = (cudaStream_t*)malloc(pt_cuSten->numStreams * sizeof(cudaStream_t*));
// Create the streams
for (int st = 0; st < pt_cuSten->numStreams; st++)
{
cudaStreamCreate(&pt_cuSten->streams[st]);
sprintf(msgStringBuffer, "Creating stream %d on GPU %d", st, pt_cuSten->deviceNum);
checkError(msgStringBuffer);
}
// Create memeory for the events
pt_cuSten->events = (cudaEvent_t*)malloc(2 * sizeof(cudaEvent_t*));
// Create the events
for (int ev = 0; ev < 2; ev++)
{
cudaEventCreate(&pt_cuSten->events[ev]);
sprintf(msgStringBuffer, "Creating event %d on GPU %d", ev, pt_cuSten->deviceNum);
checkError(msgStringBuffer);
}
// Set number of points in the stencil
pt_cuSten->numSten = numSten;
// Set number of points to the left in the stencil
pt_cuSten->numStenTop = numStenTop;
// Set number of points to the right in the stencil
pt_cuSten->numStenBottom = numStenBottom;
// Set local block array sizes - x direction
pt_cuSten->nxLocal = pt_cuSten->BLOCK_X;
// Set loacl block array sizes - y direction
pt_cuSten->nyLocal = pt_cuSten->BLOCK_Y + pt_cuSten->numStenTop + pt_cuSten->numStenBottom;
// Set the amount of shared memory required
pt_cuSten->mem_shared = (pt_cuSten->nxLocal * pt_cuSten->nyLocal) * sizeof(double) + pt_cuSten->numSten * sizeof(double);
// Find number of points per tile
pt_cuSten->nyTile = pt_cuSten->ny / pt_cuSten->numTiles;
// Set the grid up
pt_cuSten->xGrid = (pt_cuSten->nx % pt_cuSten->BLOCK_X == 0) ? (pt_cuSten->nx / pt_cuSten->BLOCK_X) : (pt_cuSten->nx / pt_cuSten->BLOCK_X + 1);
pt_cuSten->yGrid = (pt_cuSten->nyTile % pt_cuSten->BLOCK_Y == 0) ? (pt_cuSten->nyTile / pt_cuSten->BLOCK_Y) : (pt_cuSten->nyTile / pt_cuSten->BLOCK_Y + 1);
// Set the device weights pointer
pt_cuSten->weights = weights;
// Allocate the pointers for each input tile
pt_cuSten->dataInput = (double**)malloc(pt_cuSten->numTiles * sizeof(double));
// Allocate the pointers for each output tile
pt_cuSten->dataOutput = (double**)malloc(pt_cuSten->numTiles * sizeof(double));
// // Tile offset index
int offset = pt_cuSten->nx * pt_cuSten->nyTile;
// // Match the pointers to the data
for (int tile = 0; tile < pt_cuSten->numTiles; tile++)
{
// Set the input data
pt_cuSten->dataInput[tile] = &dataInput[tile * offset];
// Set the output data
pt_cuSten->dataOutput[tile] = &dataOutput[tile * offset];
}
// Create cases depending on what tile numbers - Periodic
// 1 tile
// 2 tiles
// 3 or greater
// Allocate top boundary memory
pt_cuSten->boundaryTop = (double**)malloc(pt_cuSten->numTiles * sizeof(double));
// Allocate bottom boundary memory
pt_cuSten->boundaryBottom = (double**)malloc(pt_cuSten->numTiles * sizeof(double));
switch(pt_cuSten->numTiles)
{
// One tile only requires single top and bottom to be set
case 1:
pt_cuSten->boundaryTop[0] = &dataInput[(pt_cuSten->ny - pt_cuSten->numStenTop) * pt_cuSten->nx];
pt_cuSten->boundaryBottom[0] = &dataInput[0];
break;
// Two tiles requires a special case of only setting two tiles
case 2:
pt_cuSten->boundaryTop[0] = &dataInput[(pt_cuSten->ny - pt_cuSten->numStenTop) * pt_cuSten->nx];
pt_cuSten->boundaryBottom[0] = &dataInput[pt_cuSten->nyTile * pt_cuSten->nx];
pt_cuSten->boundaryTop[1] = &dataInput[(pt_cuSten->nyTile - pt_cuSten->numStenTop) * pt_cuSten->nx];
pt_cuSten->boundaryBottom[1] = &dataInput[0];
break;
// Default case has interiors, so set the top tile, then loop over interior, then set the bottom tile
default:
pt_cuSten->boundaryTop[0] = &dataInput[(pt_cuSten->ny - pt_cuSten->numStenTop) * pt_cuSten->nx];
pt_cuSten->boundaryBottom[0] = &dataInput[pt_cuSten->nyTile * pt_cuSten->nx];
for (int tile = 1; tile < pt_cuSten->numTiles - 1; tile++)
{
pt_cuSten->boundaryTop[tile] = &dataInput[(pt_cuSten->nyTile * tile - pt_cuSten->numStenTop) * pt_cuSten->nx];
pt_cuSten->boundaryBottom[tile] = &dataInput[(pt_cuSten->nyTile * (tile + 1)) * pt_cuSten->nx];
}
pt_cuSten->boundaryTop[pt_cuSten->numTiles - 1] = &dataInput[(pt_cuSten->nyTile * (pt_cuSten->numTiles - 1) - pt_cuSten->numStenTop) * pt_cuSten->nx];
pt_cuSten->boundaryBottom[pt_cuSten->numTiles - 1] = &dataInput[0];
break;
}
// Number of points in top boundary data
pt_cuSten->numBoundaryTop = pt_cuSten->numStenTop * pt_cuSten->nx;
// Number of points in bottom boundary data
pt_cuSten->numBoundaryBottom = pt_cuSten->numStenBottom * pt_cuSten->nx;
}
// ---------------------------------------------------------------------
// Swap pointers
// ---------------------------------------------------------------------
/*! \fun void cuStenSwap2DYp
\brief Function to swap pointers necessary for timestepping
\param pt_cuSten Pointer to cuSten type provided by user
\param dataInput Pointer to data input to the on the next compute
*/
void cuStenSwap2DYp(
cuSten_t* pt_cuSten,
double* dataInput
)
{
for (int tile = 0; tile < pt_cuSten->numTiles; tile++)
{
// Swap the input and output data
std::swap(pt_cuSten->dataInput[tile], pt_cuSten->dataOutput[tile]);
// Update the boundary data
switch(pt_cuSten->numTiles)
{
// One tile only requires single top and bottom to be set
case 1:
pt_cuSten->boundaryTop[0] = &dataInput[(pt_cuSten->ny - pt_cuSten->numStenTop) * pt_cuSten->nx];
pt_cuSten->boundaryBottom[0] = &dataInput[0];
break;
// Two tiles requires a special case of only setting two tiles
case 2:
pt_cuSten->boundaryTop[0] = &dataInput[(pt_cuSten->ny - pt_cuSten->numStenTop) * pt_cuSten->nx];
pt_cuSten->boundaryBottom[0] = &dataInput[pt_cuSten->nyTile * pt_cuSten->nx];
pt_cuSten->boundaryTop[1] = &dataInput[(pt_cuSten->nyTile - pt_cuSten->numStenTop) * pt_cuSten->nx];
pt_cuSten->boundaryBottom[1] = &dataInput[0];
break;
// Default case has interiors, so set the top tile, then loop over interior, then set the bottom tile
default:
pt_cuSten->boundaryTop[0] = &dataInput[(pt_cuSten->ny - pt_cuSten->numStenTop) * pt_cuSten->nx];
pt_cuSten->boundaryBottom[0] = &dataInput[pt_cuSten->nyTile * pt_cuSten->nx];
for (int tile = 1; tile < pt_cuSten->numTiles - 1; tile++)
{
pt_cuSten->boundaryTop[tile] = &dataInput[(pt_cuSten->nyTile * tile - pt_cuSten->numStenTop) * pt_cuSten->nx];
pt_cuSten->boundaryBottom[tile] = &dataInput[(pt_cuSten->nyTile * (tile + 1)) * pt_cuSten->nx];
}
pt_cuSten->boundaryTop[pt_cuSten->numTiles - 1] = &dataInput[(pt_cuSten->nyTile * (pt_cuSten->numTiles - 1) - pt_cuSten->numStenTop) * pt_cuSten->nx];
pt_cuSten->boundaryBottom[pt_cuSten->numTiles - 1] = &dataInput[0];
break;
}
}
}
// ---------------------------------------------------------------------
// Function to destroy the struct
// ---------------------------------------------------------------------
/*! \fun void cuStenDestroy2DYp
\brief Function to destroy data associated with cuSten_t
\param pt_cuSten Pointer to cuSten type provided by user
*/
void cuStenDestroy2DYp(
cuSten_t* pt_cuSten
)
{
// Buffer used for error checking
char msgStringBuffer[1024];
// Set current active compute device
cudaSetDevice(pt_cuSten->deviceNum);
sprintf(msgStringBuffer, "Setting current device to GPU %d", pt_cuSten->deviceNum);
checkError(msgStringBuffer);
// Destroy the streams
for (int st = 0; st < pt_cuSten->numStreams; st++)
{
cudaStreamDestroy(pt_cuSten->streams[st]);
sprintf(msgStringBuffer, "Destroying stream %d on GPU %d", st, pt_cuSten->deviceNum);
checkError(msgStringBuffer);
}
// Free the main memory
free(pt_cuSten->streams);
// // Create the events
for (int ev = 0; ev < 2; ev++)
{
cudaEventDestroy(pt_cuSten->events[ev]);
sprintf(msgStringBuffer, "Destroying event %d on GPU %d", ev, pt_cuSten->deviceNum);
checkError(msgStringBuffer);
}
// Free the main memory
free(pt_cuSten->events);
// Free the pointers for each input tile
free(pt_cuSten->dataInput);
// Free the pointers for each output tile
free(pt_cuSten->dataOutput);
// Free the top boundary tile pointers
free(pt_cuSten->boundaryTop);
// Free the bottom boundary tile pointers
free(pt_cuSten->boundaryBottom);
}
// ---------------------------------------------------------------------
// End of file
// --------------------------------------------------------------------- |
0f7dd083bcc2a68c6b19711cc58d44be714384dd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "my_graph_net.cuh"
#include "my_device_func.cuh"
#include <stdarg.h>
MY_GRAPH_NET :: MY_GRAPH_NET()
{
CURAND_CALL(hiprandCreateGenerator(&rand_gen,HIPRAND_RNG_PSEUDO_DEFAULT));
CUBLAS_CALL(hipblasCreate(&handle));
}
MY_GRAPH_NET :: ~MY_GRAPH_NET()
{
CUBLAS_CALL(hipblasDestroy(handle));
CURAND_CALL(hiprandDestroyGenerator(rand_gen));
}
void MY_GRAPH_NET :: foreward()
{
_node_graph_net *cur = deque_operate.head;
do{
cur->operate(this,cur->in1,cur->in2,cur->out,FOREWARD);
cur = cur->next;
}
while(cur != NULL);
}
void MY_GRAPH_NET :: test()
{
_node_graph_net *cur = deque_operate.head;
do{
cur->operate(this,cur->in1,cur->in2,cur->out,TEST);
cur = cur->next;
}
while(cur != NULL);
}
void MY_GRAPH_NET :: backward()
{
_node_graph_net *cur = deque_operate.tail;
do{
cur->operate(this,cur->in1,cur->in2,cur->out,BACKWARD);
cur = cur->prev;
}
while(cur != NULL);
}
void MY_GRAPH_NET :: network_init(int seed)
{
int const threadsPerBolck = 1024;
int blocksPerGride = 0;
int max_row = 0;
int max_column = 0;
CURAND_CALL(hiprandSetPseudoRandomGeneratorSeed(rand_gen,seed));
_node_graph_net *cur = deque_operate.tail;
do{
if(cur->in1 != NULL)
{
if(cur->in1->row > max_row) max_row = cur->in1->row;
if(cur->in1->column > max_column) max_column = cur->in1->column;
if(((cur->in1->row) > 0) && ((cur->in1->column) > 0) && (cur->in1->x) == NULL)
{
CUDA_CALL(hipMalloc(&(cur->in1->grad_x),sizeof(float)*(cur->in1->row)*(cur->in1->column)));
CUDA_CALL(hipMalloc(&(cur->in1->x),sizeof(float)*(cur->in1->row)*(cur->in1->column)));
blocksPerGride = ((cur->in1->row)*(cur->in1->column) + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( make_ones), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, cur->in1->grad_x,(cur->in1->row)*(cur->in1->column));
}
}
if(cur->in2 != NULL)
{
if(cur->in2->row > max_row) max_row = cur->in2->row;
if(cur->in2->column > max_column) max_column = cur->in2->column;
if(((cur->in2->row) > 0) && ((cur->in2->column) > 0) && (cur->in2->x) == NULL )
{
CUDA_CALL(hipMalloc(&(cur->in2->grad_x),sizeof(float)*(cur->in2->row)*(cur->in2->column)));
CUDA_CALL(hipMalloc(&(cur->in2->x),sizeof(float)*(cur->in2->row)*(cur->in2->column)));
blocksPerGride = ((cur->in2->row)*(cur->in2->column) + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( make_ones), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, cur->in2->grad_x,(cur->in2->row)*(cur->in2->column));
}
}
if(cur->out != NULL)
{
if(cur->out->row > max_row) max_row = cur->out->row;
if(cur->out->column > max_column) max_column = cur->out->column;
if(((cur->out->row) > 0) && ((cur->out->column) > 0) && (cur->out->x) == NULL )
{
CUDA_CALL(hipMalloc(&(cur->out->grad_x),sizeof(float)*(cur->out->row)*(cur->out->column)));
CUDA_CALL(hipMalloc(&(cur->out->x),sizeof(float)*(cur->out->row)*(cur->out->column)));
blocksPerGride = ((cur->out->row)*(cur->out->column) + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( make_ones), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, cur->out->grad_x,(cur->out->row)*(cur->out->column));
}
}
cur = cur->prev;
}
while(cur != NULL);
CUDA_CALL(hipMalloc(&(d_ones.x),sizeof(float)*max_row*max_column));
d_ones.row = max_row;
d_ones.column = max_column;
blocksPerGride = ((d_ones.row)*(d_ones.column) + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( make_ones), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, d_ones.x,(d_ones.row)*(d_ones.column));
CUDA_CALL(hipMalloc(&(d_temp.x),sizeof(float)*max_row*max_column));
d_temp.row = max_row;
d_temp.column = max_column;
}
void gate_multi(void *pthis, MY_MATRIX_DEVICE *pa, MY_MATRIX_DEVICE *pb,
MY_MATRIX_DEVICE *pc, GATE_STAT stat)
{
float const one = 1.0;
float const zero = 0.0;
int const threadsPerBolck = 1024;
int blocksPerGride = 0;
if((stat == FOREWARD) || (stat == TEST))
{
CUBLAS_CALL(hipblasSgemm(((MY_GRAPH_NET*)pthis)->handle, HIPBLAS_OP_N, HIPBLAS_OP_N, pc->row, pc->column, pb->row,
&one, pa->x, pa->row, pb->x, pb->row, &zero, pc->x, pc->row));
}
else if(stat == BACKWARD)
{
blocksPerGride = (pa->row*pa->column + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( transpose), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, ((MY_GRAPH_NET*)pthis)->d_temp.x, pa->x,
pa->row, pa->column, (pa->row)*(pa->column));
CUBLAS_CALL(hipblasSgemm(((MY_GRAPH_NET*)pthis)->handle, HIPBLAS_OP_N, HIPBLAS_OP_N, pb->row, pb->column, pc->row,
&one, ((MY_GRAPH_NET*)pthis)->d_temp.x, pb->row, pc->grad_x, pc->row, &zero, pb->grad_x, pb->row));
blocksPerGride = (pb->row*pb->column + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( transpose), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, ((MY_GRAPH_NET*)pthis)->d_temp.x, pb->x,
pb->row, pb->column, (pb->row)*(pb->column));
CUBLAS_CALL(hipblasSgemm(((MY_GRAPH_NET*)pthis)->handle, HIPBLAS_OP_N, HIPBLAS_OP_N, pa->row, pa->column, pc->column,
&one, pc->grad_x, pc->row, ((MY_GRAPH_NET*)pthis)->d_temp.x, pc->column, &zero, pa->grad_x, pa->row));
}
}
MY_MATRIX_DEVICE* MY_GRAPH_NET :: multi(MY_MATRIX_DEVICE *pa, MY_MATRIX_DEVICE *pb)
{
MY_FUNC_ERROR(pa->column == pb->row);
MY_MATRIX_DEVICE *pc = new MY_MATRIX_DEVICE;
deque_matrix.AddLast(pc);
pc->row = pa->row;
pc->column = pb->column;
deque_operate.AddLast(pa,pb,pc,&gate_multi);
return pc;
}
void gate_bias_add(void *pthis, MY_MATRIX_DEVICE *pa, MY_MATRIX_DEVICE *pb,
MY_MATRIX_DEVICE *pc, GATE_STAT stat)
{
float const one = 1.0;
float const zero = 0.0;
int const threadsPerBolck = 1024;
int blocksPerGride = 0;
if((stat == FOREWARD) || (stat == TEST))
{
blocksPerGride = (pc->row*pc->column + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( add_bias), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, pa->x,pb->x,pc->x,pa->row,(pc->row)*(pc->column));
}
else if(stat ==BACKWARD)
{
blocksPerGride = (pc->row*pc->column + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( transfer), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, pa->grad_x,pc->grad_x,(pc->row)*(pc->column));
CUBLAS_CALL(hipblasSgemm(((MY_GRAPH_NET*)pthis)->handle, HIPBLAS_OP_N, HIPBLAS_OP_N, pb->row, pb->column, pc->column,
&one, pc->grad_x, pc->row, ((MY_GRAPH_NET*)pthis)->d_ones.x, pc->column, &zero, pb->grad_x, pb->row));
}
}
MY_MATRIX_DEVICE* MY_GRAPH_NET :: add_bias(MY_MATRIX_DEVICE *pa, MY_MATRIX_DEVICE *pb)
{
MY_FUNC_ERROR((pa->row == pb->row)&&(pb->column == 1));
MY_MATRIX_DEVICE *pc = new MY_MATRIX_DEVICE;
deque_matrix.AddLast(pc);
pc->row = pa->row;
pc->column = pa->column;
deque_operate.AddLast(pa,pb,pc,&gate_bias_add);
return pc;
}
void gate_relu(void *pthis, MY_MATRIX_DEVICE *pa, MY_MATRIX_DEVICE *pb,
MY_MATRIX_DEVICE *pc, GATE_STAT stat)
{
int const threadsPerBolck = 1024;
int blocksPerGride = 0;
if((stat == FOREWARD) || (stat == TEST))
{
blocksPerGride = (pa->row*pa->column + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( relu), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, pc->x,pa->x,(pa->row)*(pa->column));
}
else if(stat == BACKWARD)
{
blocksPerGride = (pa->row*pa->column + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( relu_inv), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, pa->grad_x,pa->x,(pa->row)*(pa->column));
blocksPerGride = (pc->row*pc->column + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( scalar_multi), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, pa->grad_x,pc->grad_x,pa->grad_x,(pc->row)*(pc->column));
}
}
MY_MATRIX_DEVICE* MY_GRAPH_NET :: relu(MY_MATRIX_DEVICE *pa)
{
MY_MATRIX_DEVICE *pc = new MY_MATRIX_DEVICE;
deque_matrix.AddLast(pc);
pc->row = pa->row;
pc->column = pa->column;
deque_operate.AddLast(pa,NULL,pc,&gate_relu);
return pc;
}
void gate_elu(void *pthis, MY_MATRIX_DEVICE *pa, MY_MATRIX_DEVICE *pb,
MY_MATRIX_DEVICE *pc, GATE_STAT stat)
{
int const threadsPerBolck = 1024;
int blocksPerGride = 0;
if((stat == FOREWARD) || (stat == TEST))
{
blocksPerGride = (pa->row*pa->column + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( elu), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, pc->x,pa->x,(pa->row)*(pa->column));
}
else if(stat == BACKWARD)
{
blocksPerGride = (pa->row*pa->column + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( elu_inv), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, pa->grad_x,pa->x,(pa->row)*(pa->column));
blocksPerGride = (pc->row*pc->column + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( scalar_multi), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, pa->grad_x,pc->grad_x,pa->grad_x,(pc->row)*(pc->column));
}
}
MY_MATRIX_DEVICE* MY_GRAPH_NET :: elu(MY_MATRIX_DEVICE *pa)
{
MY_MATRIX_DEVICE *pc = new MY_MATRIX_DEVICE;
deque_matrix.AddLast(pc);
pc->row = pa->row;
pc->column = pa->column;
deque_operate.AddLast(pa,NULL,pc,&gate_elu);
return pc;
}
void gate_binary_cross_entropy(void *pthis, MY_MATRIX_DEVICE *pa, MY_MATRIX_DEVICE *pb,
MY_MATRIX_DEVICE *pc, GATE_STAT stat)
{
int const threadsPerBolck = 1024;
int blocksPerGride = 0;
if((stat == FOREWARD))
{
blocksPerGride = (pc->row*pc->column + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( binary_cross_entropy), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, pa->x,pb->x,pc->x,(pc->row)*(pc->column));
}
else if(stat == BACKWARD)
{
blocksPerGride = (pa->row*pa->column + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( binary_cross_entropy_inv), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, pa->x,pb->x,pa->grad_x,(pa->row)*(pa->column));
blocksPerGride = (pc->row*pc->column + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( scalar_multi), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, pa->grad_x,pc->grad_x,pa->grad_x,(pc->row)*(pc->column));
}
}
MY_MATRIX_DEVICE* MY_GRAPH_NET :: binary_cross_entropy(MY_MATRIX_DEVICE *pa,MY_MATRIX_DEVICE *pb)
{
MY_FUNC_ERROR((pa->row == pb->row)&&(pa->column == pb->column));
MY_MATRIX_DEVICE *pc = new MY_MATRIX_DEVICE;
deque_matrix.AddLast(pc);
pc->row = pa->row;
pc->column = pa->column;
deque_operate.AddLast(pa,pb,pc,&gate_binary_cross_entropy);
return pc;
}
void gate_least_squares(void *pthis, MY_MATRIX_DEVICE *pa, MY_MATRIX_DEVICE *pb,
MY_MATRIX_DEVICE *pc, GATE_STAT stat)
{
int const threadsPerBolck = 1024;
int blocksPerGride = 0;
if((stat == FOREWARD))
{
blocksPerGride = (pc->row*pc->column + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( least_squares), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, pa->x,pb->x,pc->x,(pc->row)*(pc->column));
}
else if(stat == BACKWARD)
{
blocksPerGride = (pa->row*pa->column + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( least_squares_inv), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, pa->x,pb->x,pa->grad_x,(pa->row)*(pa->column));
blocksPerGride = (pc->row*pc->column + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( scalar_multi), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, pa->grad_x,pc->grad_x,pa->grad_x,(pc->row)*(pc->column));
}
}
MY_MATRIX_DEVICE* MY_GRAPH_NET :: least_squares(MY_MATRIX_DEVICE *pa, MY_MATRIX_DEVICE *pb)
{
MY_FUNC_ERROR((pa->row == pb->row)&&(pa->column == pb->column));
MY_MATRIX_DEVICE *pc = new MY_MATRIX_DEVICE;
deque_matrix.AddLast(pc);
pc->row = pa->row;
pc->column = pa->column;
deque_operate.AddLast(pa,pb,pc,&gate_least_squares);
return pc;
}
void gate_inverted_dropout(void *pthis, MY_MATRIX_DEVICE *pa, MY_MATRIX_DEVICE *pb,
MY_MATRIX_DEVICE *pc, GATE_STAT stat)
{
int const threadsPerBolck = 1024;
int blocksPerGride = 0;
float const dropout_rate = pb->rate;
if(stat == FOREWARD)
{
CURAND_CALL(hiprandGenerateUniform(((MY_GRAPH_NET*)pthis)->rand_gen,pb->x,(pb->row)*(pb->column)));
blocksPerGride = (pb->row*pb->column + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( dropout_table), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, pb->x,dropout_rate,(pb->row)*(pb->column));
blocksPerGride = (pc->row*pc->column + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( dropout), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, pa->x,pb->x,pc->x,dropout_rate,(pc->row)*(pc->column));
}
else if(stat == TEST)
{
blocksPerGride = (pc->row*pc->column + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( transfer), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, pa->x,pc->x,(pc->row)*(pc->column));
}
else if(stat == BACKWARD)
{
blocksPerGride = (pc->row*pc->column + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( dropout), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, pc->grad_x,pb->x,pa->grad_x,dropout_rate,(pc->row)*(pc->column));
}
}
MY_MATRIX_DEVICE* MY_GRAPH_NET :: inverted_dropout(MY_MATRIX_DEVICE *pa, float rate)
{
MY_MATRIX_DEVICE *pc = new MY_MATRIX_DEVICE;
deque_matrix.AddLast(pc);
MY_MATRIX_DEVICE *pb = new MY_MATRIX_DEVICE;
deque_matrix.AddLast(pb);
pc->row = pa->row;
pc->column = pa->column;
pb->rate = rate;
pb->row = pa->row;
pb->column = pa->column;
deque_operate.AddLast(pa,pb,pc,&gate_inverted_dropout);
return pc;
}
void gate_sigmoid(void *pthis, MY_MATRIX_DEVICE *pa, MY_MATRIX_DEVICE *pb,
MY_MATRIX_DEVICE *pc, GATE_STAT stat)
{
int const threadsPerBolck = 1024;
int blocksPerGride = 0;
if((stat == FOREWARD) || (stat == TEST))
{
blocksPerGride = (pa->row*pa->column + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( sigmoid), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, pc->x,pa->x,(pa->row)*(pa->column));
}
else if(stat == BACKWARD)
{
blocksPerGride = (pa->row*pa->column + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( sigmoid_inv), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, pa->grad_x,pa->x,(pa->row)*(pa->column));
blocksPerGride = (pc->row*pc->column + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( scalar_multi), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, pa->grad_x,pc->grad_x,pa->grad_x,(pc->row)*(pc->column));
}
}
MY_MATRIX_DEVICE* MY_GRAPH_NET :: sigmoid(MY_MATRIX_DEVICE *pa)
{
MY_MATRIX_DEVICE *pc = new MY_MATRIX_DEVICE;
deque_matrix.AddLast(pc);
pc->row = pa->row;
pc->column = pa->column;
deque_operate.AddLast(pa,NULL,pc,&gate_sigmoid);
return pc;
}
void gate_tanh(void *pthis, MY_MATRIX_DEVICE *pa, MY_MATRIX_DEVICE *pb,
MY_MATRIX_DEVICE *pc, GATE_STAT stat)
{
int const threadsPerBolck = 1024;
int blocksPerGride = 0;
if((stat == FOREWARD) || (stat == TEST))
{
blocksPerGride = (pa->row*pa->column + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( tanh), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, pc->x,pa->x,(pa->row)*(pa->column));
}
else if(stat == BACKWARD)
{
blocksPerGride = (pa->row*pa->column + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( tanh_inv), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, pa->grad_x,pa->x,(pa->row)*(pa->column));
blocksPerGride = (pc->row*pc->column + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( scalar_multi), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, pa->grad_x,pc->grad_x,pa->grad_x,(pc->row)*(pc->column));
}
}
MY_MATRIX_DEVICE* MY_GRAPH_NET :: tanh(MY_MATRIX_DEVICE *pa)
{
MY_MATRIX_DEVICE *pc = new MY_MATRIX_DEVICE;
deque_matrix.AddLast(pc);
pc->row = pa->row;
pc->column = pa->column;
deque_operate.AddLast(pa,NULL,pc,&gate_tanh);
return pc;
}
void gate_adding_point(void *pthis, MY_MATRIX_DEVICE *pa, MY_MATRIX_DEVICE *pb,
MY_MATRIX_DEVICE *pc, GATE_STAT stat)
{
int const threadsPerBolck = 1024;
int blocksPerGride = 0;
if((stat == FOREWARD) || (stat == TEST))
{
blocksPerGride = (pc->row*pc->column + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( add), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, pa->x,pb->x,pc->x,(pc->row)*(pc->column));
}
else if(stat ==BACKWARD)
{
blocksPerGride = (pc->row*pc->column + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( transfer), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, pa->grad_x,pc->grad_x,(pc->row)*(pc->column));
blocksPerGride = (pc->row*pc->column + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( transfer), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, pb->grad_x,pc->grad_x,(pc->row)*(pc->column));
}
}
MY_MATRIX_DEVICE* MY_GRAPH_NET :: adding_point(MY_MATRIX_DEVICE *pa, MY_MATRIX_DEVICE *pb)
{
MY_FUNC_ERROR(((pa->column == pb->column) && (pa->row == pb->row)));
MY_MATRIX_DEVICE *pc = new MY_MATRIX_DEVICE;
deque_matrix.AddLast(pc);
pc->row = pa->row;
pc->column = pa->column;
deque_operate.AddLast(pa,pb,pc,&gate_adding_point);
return pc;
}
void gate_dividing_point(void *pthis, MY_MATRIX_DEVICE *pa, MY_MATRIX_DEVICE *pb,
MY_MATRIX_DEVICE *pc, GATE_STAT stat)
{
int const threadsPerBolck = 1024;
int blocksPerGride = 0;
if((stat == FOREWARD) || (stat == TEST))
{
blocksPerGride = (pa->row*pa->column + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( transfer), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, pb->x,pa->x,(pa->row)*(pa->column));
blocksPerGride = (pa->row*pa->column + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( transfer), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, pc->x,pa->x,(pa->row)*(pa->column));
}
else if(stat ==BACKWARD)
{
blocksPerGride = (pa->row*pa->column + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( add), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, pb->grad_x,pc->grad_x,pa->grad_x,(pa->row)*(pa->column));
}
}
void MY_GRAPH_NET :: dividing_point(MY_MATRIX_DEVICE *pa, MY_MATRIX_DEVICE **pb, MY_MATRIX_DEVICE **pc)
{
*pb = new MY_MATRIX_DEVICE;
*pc = new MY_MATRIX_DEVICE;
deque_matrix.AddLast(*pb);
deque_matrix.AddLast(*pc);
(*pb)->row = pa->row;
(*pb)->column = pa->column;
(*pc)->row = pa->row;
(*pc)->column = pa->column;
deque_operate.AddLast(pa,*pb,*pc,&gate_dividing_point);
}
void gate_stack(void *pthis, MY_MATRIX_DEVICE *pa, MY_MATRIX_DEVICE *pb, MY_MATRIX_DEVICE *pc, GATE_STAT stat)
{
int const threadsPerBolck = 1024;
int blocksPerGride = 0;
if((stat == FOREWARD) || (stat == TEST))
{
blocksPerGride = (pa->row*pa->column + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( transfer), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, pc->x,pa->x,(pa->row)*(pa->column));
blocksPerGride = (pb->row*pb->column + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( transfer), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, (pc->x)+((pa->row)*(pa->column)),pb->x,(pb->row)*(pb->column));
}
else if(stat == BACKWARD)
{
blocksPerGride = (pa->row*pa->column + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( transfer), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, pa->grad_x,pc->grad_x,(pa->row)*(pa->column));
blocksPerGride = (pb->row*pb->column + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( transfer), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, pb->grad_x,(pc->grad_x)+((pa->row)*(pa->column)),
(pb->row)*(pb->column));
}
}
MY_MATRIX_DEVICE* MY_GRAPH_NET :: stack(MY_MATRIX_DEVICE *pa, MY_MATRIX_DEVICE *pb)
{
MY_FUNC_ERROR(pa->row == pb->row);
MY_MATRIX_DEVICE *pc = new MY_MATRIX_DEVICE;
deque_matrix.AddLast(pc);
pc->row = pa->row;
pc->column = pa->column + pb->column;
deque_operate.AddLast(pa,pb,pc,&gate_stack);
return pc;
}
void gate_merge(void *pthis, MY_MATRIX_DEVICE *pa, MY_MATRIX_DEVICE *pb, MY_MATRIX_DEVICE *pc, GATE_STAT stat)
{
int const threadsPerBolck = 1024;
int blocksPerGride = 0;
if((stat == FOREWARD) || (stat == TEST))
{
blocksPerGride = (pa->row*pa->column + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( merge), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, pc->x,pc->row,
pa->x,pa->row,0,(pa->row)*(pa->column));
blocksPerGride = (pb->row*pb->column + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( merge), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, pc->x,pc->row,
pb->x,pb->row, pa->row,(pb->row)*(pb->column));
}
else if(stat == BACKWARD)
{
printf("sdfssdf\n");
blocksPerGride = (pa->row*pa->column + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( inv_merge), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, pa->grad_x,pa->row,
pc->grad_x,pc->row,0,(pa->row)*(pa->column));
blocksPerGride = (pb->row*pb->column + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( inv_merge), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, pb->grad_x,pb->row,
pc->grad_x,pc->row,pa->row, (pb->row)*(pb->column));
}
}
MY_MATRIX_DEVICE* MY_GRAPH_NET :: merge(MY_MATRIX_DEVICE *pa, MY_MATRIX_DEVICE *pb)
{
MY_FUNC_ERROR(pa->column == pb->column);
MY_MATRIX_DEVICE *pc = new MY_MATRIX_DEVICE;
deque_matrix.AddLast(pc);
pc->row = pa->row + pb->row;
pc->column = pa->column;
deque_operate.AddLast(pa,pb,pc,&gate_merge);
return pc;
}
void gate_rand_scale(void *pthis, MY_MATRIX_DEVICE *pa, MY_MATRIX_DEVICE *pb, MY_MATRIX_DEVICE *pc, GATE_STAT stat)
{
int const threadsPerBolck = 1024;
int blocksPerGride = 0;
if((stat == FOREWARD) || (stat == TEST))
{
float from = pa->rate;
float to = pc->rate;
float temp = (rand()%(int)((to - from)*1000))/1000.0 + from;
blocksPerGride = (pa->row*pa->column + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( multi_scala), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, pc->x,pa->x,temp,(pa->row)*(pa->column));
pa->rate = temp;
}
else if(stat == BACKWARD)
{
float temp = pa->rate;
blocksPerGride = (pa->row*pa->column + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( multi_scala), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, pa->grad_x,pc->grad_x,temp,(pa->row)*(pa->column));
}
}
MY_MATRIX_DEVICE* MY_GRAPH_NET :: rand_scale(MY_MATRIX_DEVICE *pa, float from, float to)
{
MY_MATRIX_DEVICE *pc = new MY_MATRIX_DEVICE;
deque_matrix.AddLast(pc);
pa->rate = from;
pc->rate = to;
pc->row = pa->row;
pc->column = pa->column;
deque_operate.AddLast(pa,NULL,pc,&gate_rand_scale);
return pc;
}
void gate_scale(void *pthis, MY_MATRIX_DEVICE *pa, MY_MATRIX_DEVICE *pb, MY_MATRIX_DEVICE *pc, GATE_STAT stat)
{
int const threadsPerBolck = 1024;
int blocksPerGride = 0;
float scale = pa->rate;
if((stat == FOREWARD) || (stat == TEST))
{
blocksPerGride = (pa->row*pa->column + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( multi_scala), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, pc->x,pa->x,scale,(pa->row)*(pa->column));
}
else if(stat == BACKWARD)
{
blocksPerGride = (pa->row*pa->column + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( multi_scala), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, pa->grad_x,pc->grad_x,scale,(pa->row)*(pa->column));
}
}
MY_MATRIX_DEVICE* MY_GRAPH_NET :: scale(MY_MATRIX_DEVICE *pa, float fa)
{
MY_MATRIX_DEVICE *pc = new MY_MATRIX_DEVICE;
deque_matrix.AddLast(pc);
pc->row = pa->row;
pc->column = pa->column;
pa->rate = fa;
deque_operate.AddLast(pa,NULL,pc,&gate_scale);
return pc;
}
void gate_uniform_noise(void *pthis, MY_MATRIX_DEVICE *pa, MY_MATRIX_DEVICE *pb, MY_MATRIX_DEVICE *pc, GATE_STAT stat)
{
if((stat == FOREWARD) || (stat == TEST))
{
CURAND_CALL(hiprandGenerateUniform(((MY_GRAPH_NET*)pthis)->rand_gen,pc->x,(pc->row)*(pc->column)));
}
}
MY_MATRIX_DEVICE* MY_GRAPH_NET :: uniform_noise(int row, int column)
{
MY_MATRIX_DEVICE *pc = new MY_MATRIX_DEVICE;
deque_matrix.AddLast(pc);
pc->row = row;
pc->column = column;
deque_operate.AddLast(NULL,NULL,pc,&gate_uniform_noise);
return pc;
}
void gate_white_noise(void *pthis, MY_MATRIX_DEVICE *pa, MY_MATRIX_DEVICE *pb, MY_MATRIX_DEVICE *pc, GATE_STAT stat)
{
int const threadsPerBolck = 1024;
int blocksPerGride = 0;
if((stat == FOREWARD) || (stat == TEST))
{
CURAND_CALL(hiprandGenerateUniform(((MY_GRAPH_NET*)pthis)->rand_gen,pc->x,(pc->row)*(pc->column)));
blocksPerGride = (pc->row*pc->column + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( dropout_table), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, pc->x,pc->rate,(pc->row)*(pc->column));
}
}
MY_MATRIX_DEVICE* MY_GRAPH_NET :: white_noise(int row, int column,float rate)
{
MY_MATRIX_DEVICE *pc = new MY_MATRIX_DEVICE;
deque_matrix.AddLast(pc);
pc->row = row;
pc->column = column;
pc->rate = rate;
deque_operate.AddLast(NULL,NULL,pc,&gate_white_noise);
return pc;
}
void gate_min(void *pthis, MY_MATRIX_DEVICE *pa, MY_MATRIX_DEVICE *pb, MY_MATRIX_DEVICE *pc, GATE_STAT stat)
{
int const threadsPerBolck = 1024;
int blocksPerGride = 0;
if((stat == FOREWARD) || (stat == TEST))
{
blocksPerGride = (pa->row*pa->column + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( min), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, pc->x,pa->x,pc->rate,(pa->row)*(pa->column));
}
else if(stat == BACKWARD)
{
blocksPerGride = (pa->row*pa->column + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( min_inv), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, pa->grad_x,pa->x,pc->rate,(pa->row)*(pa->column));
blocksPerGride = (pc->row*pc->column + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( scalar_multi), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, pa->grad_x,pc->grad_x,pa->grad_x,(pc->row)*(pc->column));
}
}
MY_MATRIX_DEVICE* MY_GRAPH_NET :: min(MY_MATRIX_DEVICE *pa, float max_value)
{
MY_MATRIX_DEVICE *pc = new MY_MATRIX_DEVICE;
deque_matrix.AddLast(pc);
pc->row = pa->row;
pc->column = pa->column;
pc->rate = max_value;
deque_operate.AddLast(pa,NULL,pc,&gate_min);
return pc;
}
float MY_GRAPH_NET :: sum_absolute(MY_MATRIX_DEVICE *pa)
{
float result;
CUBLAS_CALL(hipblasSasum(handle,(pa->row)*(pa->column),pa->x,1,&result));
return result;
}
float MY_GRAPH_NET :: average_absolute(MY_MATRIX_DEVICE *pa)
{
float result;
CUBLAS_CALL(hipblasSasum(handle,(pa->row)*(pa->column),pa->x,1,&result));
return result/((pa->row)*(pa->column));
}
float MY_GRAPH_NET :: accuracy(MY_MATRIX_DEVICE *y, MY_MATRIX_DEVICE *t)
{
MY_FUNC_ERROR((y->row == t->row)&&(y->column == t->column));
int const threadsPerBolck = 1024;
int blocksPerGride = 0;
float result;
blocksPerGride = (y->column + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( accuracy_table), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, y->x,t->x,d_temp.x,y->row, y->column);
CUBLAS_CALL(hipblasSasum(handle,y->column,d_temp.x,1,&result));
return result/y->column;
}
//-----------------------------
MY_MOMENTUM_OPTIMIZER :: MY_MOMENTUM_OPTIMIZER()
{
learning_rate = 0.1;
momentum_rate = 0.0;
}
MY_MOMENTUM_OPTIMIZER :: ~MY_MOMENTUM_OPTIMIZER()
{
}
void MY_MOMENTUM_OPTIMIZER :: set_hyperpara(float l_rate, float m_rate)
{
learning_rate = l_rate;
momentum_rate = m_rate;
}
void gate_momentum_update(void *pthis, MY_MATRIX_DEVICE *pa, MY_MATRIX_DEVICE *pb,
MY_MATRIX_DEVICE *pc, GATE_STAT stat)
{
int const threadsPerBolck = 1024;
int blocksPerGride = 0;
float l_rate = ((MY_MOMENTUM_OPTIMIZER*)pthis)->learning_rate;
float m_rate = ((MY_MOMENTUM_OPTIMIZER*)pthis)->momentum_rate;
blocksPerGride = (pa->row*pa->column + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( momentum_vector), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, pa->grad_x,pc->grad_x,l_rate,m_rate,(pa->row)*(pa->column));
blocksPerGride = (pa->row*pa->column + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( add), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, pa->x,pc->grad_x,pa->x,(pa->row)*(pa->column));
}
void MY_MOMENTUM_OPTIMIZER :: set_para(MY_MATRIX_DEVICE *pa, ...)
{
int const threadsPerBolck = 1024;
int blocksPerGride = 0;
MY_MATRIX_DEVICE *pc = new MY_MATRIX_DEVICE;
deque_matrix.AddLast(pc);
pc->row = pa->row;
pc->column = pa->column;
CUDA_CALL(hipMalloc(&(pc->grad_x),sizeof(float)*(pc->row)*(pc->column)));
blocksPerGride = ((pc->row)*(pc->column) + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( make_zeros), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, pc->grad_x,(pc->row)*(pc->column));
deque_operate.AddLast(pa,NULL,pc,&gate_momentum_update);
va_list ap;
MY_MATRIX_DEVICE *arg;
va_start(ap,pa);
while(1){
arg=va_arg(ap,MY_MATRIX_DEVICE*);
if (arg == NULL) break;
pc = new MY_MATRIX_DEVICE;
deque_matrix.AddLast(pc);
pc->row = arg->row;
pc->column = arg->column;
CUDA_CALL(hipMalloc(&(pc->grad_x),sizeof(float)*(pc->row)*(pc->column)));
blocksPerGride = ((pc->row)*(pc->column) + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( make_zeros), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, pc->grad_x,(pc->row)*(pc->column));
deque_operate.AddLast(arg,NULL,pc,&gate_momentum_update);
}
va_end(ap);
}
void MY_MOMENTUM_OPTIMIZER :: update()
{
_node_graph_net *cur = deque_operate.head;
do{
cur->operate(this,cur->in1,cur->in2,cur->out,BACKWARD);
cur = cur->next;
}
while(cur != NULL);
}
//----------------------------------------------
MY_ADAM_OPTIMIZER :: MY_ADAM_OPTIMIZER()
{
learning_rate = 0.0001;
beta1 = 0.9;
beta2 = 0.999;
beta1_t = beta1;
beta2_t = beta2;
}
MY_ADAM_OPTIMIZER :: ~MY_ADAM_OPTIMIZER()
{
}
void MY_ADAM_OPTIMIZER :: set_hyperpara(float l_rate,float beta1_rate, float beta2_rate)
{
learning_rate = l_rate;
beta1 = beta1_rate;
beta2 = beta2_rate;
beta1_t = beta1;
beta2_t = beta2;;
}
void gate_adam_update(void *pthis, MY_MATRIX_DEVICE *pa, MY_MATRIX_DEVICE *pb,
MY_MATRIX_DEVICE *pc, GATE_STAT stat)
{
int const threadsPerBolck = 1024;
int blocksPerGride = 0;
float learning_rate = ((MY_ADAM_OPTIMIZER*)pthis)->learning_rate;
float beta1 = ((MY_ADAM_OPTIMIZER*)pthis)->beta1;
float beta2 = ((MY_ADAM_OPTIMIZER*)pthis)->beta2;
float beta1_t = ((MY_ADAM_OPTIMIZER*)pthis)->beta1_t;
float beta2_t = ((MY_ADAM_OPTIMIZER*)pthis)->beta2_t;
blocksPerGride = (pa->row*pa->column + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( adam_beta1), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, pa->grad_x,pb->grad_x,beta1,(pa->row)*(pa->column));
blocksPerGride = (pa->row*pa->column + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( adam_beta2), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, pa->grad_x,pc->grad_x,beta2,(pa->row)*(pa->column));
blocksPerGride = (pa->row*pa->column + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( adam_sum), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, pb->grad_x,pc->grad_x,pa->x,learning_rate,
beta1_t,beta2_t,(pa->row)*(pa->column));
}
void MY_ADAM_OPTIMIZER :: set_para(MY_MATRIX_DEVICE *pa, ...)
{
int const threadsPerBolck = 1024;
int blocksPerGride = 0;
MY_MATRIX_DEVICE *pb = new MY_MATRIX_DEVICE;
MY_MATRIX_DEVICE *pc = new MY_MATRIX_DEVICE;
deque_matrix.AddLast(pb);
deque_matrix.AddLast(pc);
pb->row = pa->row;
pb->column = pa->column;
CUDA_CALL(hipMalloc(&(pb->grad_x),sizeof(float)*(pb->row)*(pb->column)));
blocksPerGride = ((pb->row)*(pb->column) + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( make_zeros), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, pb->grad_x,(pb->row)*(pb->column));
pc->row = pa->row;
pc->column = pa->column;
CUDA_CALL(hipMalloc(&(pc->grad_x),sizeof(float)*(pc->row)*(pc->column)));
blocksPerGride = ((pc->row)*(pc->column) + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( make_zeros), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, pc->grad_x,(pc->row)*(pc->column));
deque_operate.AddLast(pa,pb,pc,&gate_adam_update);
va_list ap;
MY_MATRIX_DEVICE *arg;
va_start(ap,pa);
while(1){
arg=va_arg(ap,MY_MATRIX_DEVICE*);
if (arg == NULL) break;
pb = new MY_MATRIX_DEVICE;
pc = new MY_MATRIX_DEVICE;
deque_matrix.AddLast(pb);
deque_matrix.AddLast(pc);
pb->row = arg->row;
pb->column = arg->column;
CUDA_CALL(hipMalloc(&(pb->grad_x),sizeof(float)*(pb->row)*(pb->column)));
blocksPerGride = ((pb->row)*(pb->column) + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( make_zeros), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, pb->grad_x,(pb->row)*(pb->column));
pc->row = arg->row;
pc->column = arg->column;
CUDA_CALL(hipMalloc(&(pc->grad_x),sizeof(float)*(pc->row)*(pc->column)));
blocksPerGride = ((pc->row)*(pc->column) + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( make_zeros), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, pc->grad_x,(pc->row)*(pc->column));
deque_operate.AddLast(arg,pb,pc,&gate_adam_update);
}
va_end(ap);
}
void MY_ADAM_OPTIMIZER :: update()
{
_node_graph_net *cur = deque_operate.head;
do{
cur->operate(this,cur->in1,cur->in2,cur->out,BACKWARD);
cur = cur->next;
}
while(cur != NULL);
beta1_t = beta1_t * beta1;
beta2_t = beta2_t * beta2;
}
//-------------------------------------------------
MY_REGULARIZATION :: MY_REGULARIZATION()
{
L1_rate = 1e-8;
L2_rate = 1e-8;
max_rate = 2.0;
}
MY_REGULARIZATION :: ~MY_REGULARIZATION()
{
}
void gate_max_norm(void *pthis, MY_MATRIX_DEVICE *pa, MY_MATRIX_DEVICE *pb, MY_MATRIX_DEVICE *pc, GATE_STAT stat)
{
int const threadsPerBolck = 1024;
int blocksPerGride = 0;
float rate = ((MY_REGULARIZATION*)pthis)->max_rate;
blocksPerGride = (pa->row*pa->column + threadsPerBolck -1)/threadsPerBolck;
hipLaunchKernelGGL(( max_norm), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, pa->x,rate,(pa->row)*(pa->column));
}
void MY_REGULARIZATION :: set_para(REGULARIZATION_STAT stat, float rate, ...)
{
va_list ap;
MY_MATRIX_DEVICE *arg;
va_start(ap,rate);
while(1){
arg=va_arg(ap,MY_MATRIX_DEVICE*);
if (arg == NULL) break;
if(stat == MAX_NORM){
max_rate = rate;
deque_operate.AddLast(arg,NULL,NULL,&gate_max_norm);
}
else{
printf("there are no L1,L2 norm");
exit(1);
}
}
va_end(ap);
}
void MY_REGULARIZATION :: update()
{
_node_graph_net *cur = deque_operate.head;
do{
cur->operate(this,cur->in1,cur->in2,cur->out,BACKWARD);
cur = cur->next;
}
while(cur != NULL);
}
| 0f7dd083bcc2a68c6b19711cc58d44be714384dd.cu | #include "my_graph_net.cuh"
#include "my_device_func.cuh"
#include <stdarg.h>
MY_GRAPH_NET :: MY_GRAPH_NET()
{
CURAND_CALL(curandCreateGenerator(&rand_gen,CURAND_RNG_PSEUDO_DEFAULT));
CUBLAS_CALL(cublasCreate(&handle));
}
MY_GRAPH_NET :: ~MY_GRAPH_NET()
{
CUBLAS_CALL(cublasDestroy(handle));
CURAND_CALL(curandDestroyGenerator(rand_gen));
}
void MY_GRAPH_NET :: foreward()
{
_node_graph_net *cur = deque_operate.head;
do{
cur->operate(this,cur->in1,cur->in2,cur->out,FOREWARD);
cur = cur->next;
}
while(cur != NULL);
}
void MY_GRAPH_NET :: test()
{
_node_graph_net *cur = deque_operate.head;
do{
cur->operate(this,cur->in1,cur->in2,cur->out,TEST);
cur = cur->next;
}
while(cur != NULL);
}
void MY_GRAPH_NET :: backward()
{
_node_graph_net *cur = deque_operate.tail;
do{
cur->operate(this,cur->in1,cur->in2,cur->out,BACKWARD);
cur = cur->prev;
}
while(cur != NULL);
}
void MY_GRAPH_NET :: network_init(int seed)
{
int const threadsPerBolck = 1024;
int blocksPerGride = 0;
int max_row = 0;
int max_column = 0;
CURAND_CALL(curandSetPseudoRandomGeneratorSeed(rand_gen,seed));
_node_graph_net *cur = deque_operate.tail;
do{
if(cur->in1 != NULL)
{
if(cur->in1->row > max_row) max_row = cur->in1->row;
if(cur->in1->column > max_column) max_column = cur->in1->column;
if(((cur->in1->row) > 0) && ((cur->in1->column) > 0) && (cur->in1->x) == NULL)
{
CUDA_CALL(cudaMalloc(&(cur->in1->grad_x),sizeof(float)*(cur->in1->row)*(cur->in1->column)));
CUDA_CALL(cudaMalloc(&(cur->in1->x),sizeof(float)*(cur->in1->row)*(cur->in1->column)));
blocksPerGride = ((cur->in1->row)*(cur->in1->column) + threadsPerBolck -1)/threadsPerBolck;
make_ones<<<blocksPerGride, threadsPerBolck>>>(cur->in1->grad_x,(cur->in1->row)*(cur->in1->column));
}
}
if(cur->in2 != NULL)
{
if(cur->in2->row > max_row) max_row = cur->in2->row;
if(cur->in2->column > max_column) max_column = cur->in2->column;
if(((cur->in2->row) > 0) && ((cur->in2->column) > 0) && (cur->in2->x) == NULL )
{
CUDA_CALL(cudaMalloc(&(cur->in2->grad_x),sizeof(float)*(cur->in2->row)*(cur->in2->column)));
CUDA_CALL(cudaMalloc(&(cur->in2->x),sizeof(float)*(cur->in2->row)*(cur->in2->column)));
blocksPerGride = ((cur->in2->row)*(cur->in2->column) + threadsPerBolck -1)/threadsPerBolck;
make_ones<<<blocksPerGride, threadsPerBolck>>>(cur->in2->grad_x,(cur->in2->row)*(cur->in2->column));
}
}
if(cur->out != NULL)
{
if(cur->out->row > max_row) max_row = cur->out->row;
if(cur->out->column > max_column) max_column = cur->out->column;
if(((cur->out->row) > 0) && ((cur->out->column) > 0) && (cur->out->x) == NULL )
{
CUDA_CALL(cudaMalloc(&(cur->out->grad_x),sizeof(float)*(cur->out->row)*(cur->out->column)));
CUDA_CALL(cudaMalloc(&(cur->out->x),sizeof(float)*(cur->out->row)*(cur->out->column)));
blocksPerGride = ((cur->out->row)*(cur->out->column) + threadsPerBolck -1)/threadsPerBolck;
make_ones<<<blocksPerGride, threadsPerBolck>>>(cur->out->grad_x,(cur->out->row)*(cur->out->column));
}
}
cur = cur->prev;
}
while(cur != NULL);
CUDA_CALL(cudaMalloc(&(d_ones.x),sizeof(float)*max_row*max_column));
d_ones.row = max_row;
d_ones.column = max_column;
blocksPerGride = ((d_ones.row)*(d_ones.column) + threadsPerBolck -1)/threadsPerBolck;
make_ones<<<blocksPerGride, threadsPerBolck>>>(d_ones.x,(d_ones.row)*(d_ones.column));
CUDA_CALL(cudaMalloc(&(d_temp.x),sizeof(float)*max_row*max_column));
d_temp.row = max_row;
d_temp.column = max_column;
}
void gate_multi(void *pthis, MY_MATRIX_DEVICE *pa, MY_MATRIX_DEVICE *pb,
MY_MATRIX_DEVICE *pc, GATE_STAT stat)
{
float const one = 1.0;
float const zero = 0.0;
int const threadsPerBolck = 1024;
int blocksPerGride = 0;
if((stat == FOREWARD) || (stat == TEST))
{
CUBLAS_CALL(cublasSgemm(((MY_GRAPH_NET*)pthis)->handle, CUBLAS_OP_N, CUBLAS_OP_N, pc->row, pc->column, pb->row,
&one, pa->x, pa->row, pb->x, pb->row, &zero, pc->x, pc->row));
}
else if(stat == BACKWARD)
{
blocksPerGride = (pa->row*pa->column + threadsPerBolck -1)/threadsPerBolck;
transpose<<<blocksPerGride, threadsPerBolck>>>(((MY_GRAPH_NET*)pthis)->d_temp.x, pa->x,
pa->row, pa->column, (pa->row)*(pa->column));
CUBLAS_CALL(cublasSgemm(((MY_GRAPH_NET*)pthis)->handle, CUBLAS_OP_N, CUBLAS_OP_N, pb->row, pb->column, pc->row,
&one, ((MY_GRAPH_NET*)pthis)->d_temp.x, pb->row, pc->grad_x, pc->row, &zero, pb->grad_x, pb->row));
blocksPerGride = (pb->row*pb->column + threadsPerBolck -1)/threadsPerBolck;
transpose<<<blocksPerGride, threadsPerBolck>>>(((MY_GRAPH_NET*)pthis)->d_temp.x, pb->x,
pb->row, pb->column, (pb->row)*(pb->column));
CUBLAS_CALL(cublasSgemm(((MY_GRAPH_NET*)pthis)->handle, CUBLAS_OP_N, CUBLAS_OP_N, pa->row, pa->column, pc->column,
&one, pc->grad_x, pc->row, ((MY_GRAPH_NET*)pthis)->d_temp.x, pc->column, &zero, pa->grad_x, pa->row));
}
}
MY_MATRIX_DEVICE* MY_GRAPH_NET :: multi(MY_MATRIX_DEVICE *pa, MY_MATRIX_DEVICE *pb)
{
MY_FUNC_ERROR(pa->column == pb->row);
MY_MATRIX_DEVICE *pc = new MY_MATRIX_DEVICE;
deque_matrix.AddLast(pc);
pc->row = pa->row;
pc->column = pb->column;
deque_operate.AddLast(pa,pb,pc,&gate_multi);
return pc;
}
void gate_bias_add(void *pthis, MY_MATRIX_DEVICE *pa, MY_MATRIX_DEVICE *pb,
MY_MATRIX_DEVICE *pc, GATE_STAT stat)
{
float const one = 1.0;
float const zero = 0.0;
int const threadsPerBolck = 1024;
int blocksPerGride = 0;
if((stat == FOREWARD) || (stat == TEST))
{
blocksPerGride = (pc->row*pc->column + threadsPerBolck -1)/threadsPerBolck;
add_bias<<<blocksPerGride, threadsPerBolck>>>(pa->x,pb->x,pc->x,pa->row,(pc->row)*(pc->column));
}
else if(stat ==BACKWARD)
{
blocksPerGride = (pc->row*pc->column + threadsPerBolck -1)/threadsPerBolck;
transfer<<<blocksPerGride, threadsPerBolck>>>(pa->grad_x,pc->grad_x,(pc->row)*(pc->column));
CUBLAS_CALL(cublasSgemm(((MY_GRAPH_NET*)pthis)->handle, CUBLAS_OP_N, CUBLAS_OP_N, pb->row, pb->column, pc->column,
&one, pc->grad_x, pc->row, ((MY_GRAPH_NET*)pthis)->d_ones.x, pc->column, &zero, pb->grad_x, pb->row));
}
}
MY_MATRIX_DEVICE* MY_GRAPH_NET :: add_bias(MY_MATRIX_DEVICE *pa, MY_MATRIX_DEVICE *pb)
{
MY_FUNC_ERROR((pa->row == pb->row)&&(pb->column == 1));
MY_MATRIX_DEVICE *pc = new MY_MATRIX_DEVICE;
deque_matrix.AddLast(pc);
pc->row = pa->row;
pc->column = pa->column;
deque_operate.AddLast(pa,pb,pc,&gate_bias_add);
return pc;
}
void gate_relu(void *pthis, MY_MATRIX_DEVICE *pa, MY_MATRIX_DEVICE *pb,
MY_MATRIX_DEVICE *pc, GATE_STAT stat)
{
int const threadsPerBolck = 1024;
int blocksPerGride = 0;
if((stat == FOREWARD) || (stat == TEST))
{
blocksPerGride = (pa->row*pa->column + threadsPerBolck -1)/threadsPerBolck;
relu<<<blocksPerGride, threadsPerBolck>>>(pc->x,pa->x,(pa->row)*(pa->column));
}
else if(stat == BACKWARD)
{
blocksPerGride = (pa->row*pa->column + threadsPerBolck -1)/threadsPerBolck;
relu_inv<<<blocksPerGride, threadsPerBolck>>>(pa->grad_x,pa->x,(pa->row)*(pa->column));
blocksPerGride = (pc->row*pc->column + threadsPerBolck -1)/threadsPerBolck;
scalar_multi<<<blocksPerGride, threadsPerBolck>>>(pa->grad_x,pc->grad_x,pa->grad_x,(pc->row)*(pc->column));
}
}
MY_MATRIX_DEVICE* MY_GRAPH_NET :: relu(MY_MATRIX_DEVICE *pa)
{
MY_MATRIX_DEVICE *pc = new MY_MATRIX_DEVICE;
deque_matrix.AddLast(pc);
pc->row = pa->row;
pc->column = pa->column;
deque_operate.AddLast(pa,NULL,pc,&gate_relu);
return pc;
}
void gate_elu(void *pthis, MY_MATRIX_DEVICE *pa, MY_MATRIX_DEVICE *pb,
MY_MATRIX_DEVICE *pc, GATE_STAT stat)
{
int const threadsPerBolck = 1024;
int blocksPerGride = 0;
if((stat == FOREWARD) || (stat == TEST))
{
blocksPerGride = (pa->row*pa->column + threadsPerBolck -1)/threadsPerBolck;
elu<<<blocksPerGride, threadsPerBolck>>>(pc->x,pa->x,(pa->row)*(pa->column));
}
else if(stat == BACKWARD)
{
blocksPerGride = (pa->row*pa->column + threadsPerBolck -1)/threadsPerBolck;
elu_inv<<<blocksPerGride, threadsPerBolck>>>(pa->grad_x,pa->x,(pa->row)*(pa->column));
blocksPerGride = (pc->row*pc->column + threadsPerBolck -1)/threadsPerBolck;
scalar_multi<<<blocksPerGride, threadsPerBolck>>>(pa->grad_x,pc->grad_x,pa->grad_x,(pc->row)*(pc->column));
}
}
MY_MATRIX_DEVICE* MY_GRAPH_NET :: elu(MY_MATRIX_DEVICE *pa)
{
MY_MATRIX_DEVICE *pc = new MY_MATRIX_DEVICE;
deque_matrix.AddLast(pc);
pc->row = pa->row;
pc->column = pa->column;
deque_operate.AddLast(pa,NULL,pc,&gate_elu);
return pc;
}
void gate_binary_cross_entropy(void *pthis, MY_MATRIX_DEVICE *pa, MY_MATRIX_DEVICE *pb,
MY_MATRIX_DEVICE *pc, GATE_STAT stat)
{
int const threadsPerBolck = 1024;
int blocksPerGride = 0;
if((stat == FOREWARD))
{
blocksPerGride = (pc->row*pc->column + threadsPerBolck -1)/threadsPerBolck;
binary_cross_entropy<<<blocksPerGride, threadsPerBolck>>>(pa->x,pb->x,pc->x,(pc->row)*(pc->column));
}
else if(stat == BACKWARD)
{
blocksPerGride = (pa->row*pa->column + threadsPerBolck -1)/threadsPerBolck;
binary_cross_entropy_inv<<<blocksPerGride, threadsPerBolck>>>(pa->x,pb->x,pa->grad_x,(pa->row)*(pa->column));
blocksPerGride = (pc->row*pc->column + threadsPerBolck -1)/threadsPerBolck;
scalar_multi<<<blocksPerGride, threadsPerBolck>>>(pa->grad_x,pc->grad_x,pa->grad_x,(pc->row)*(pc->column));
}
}
MY_MATRIX_DEVICE* MY_GRAPH_NET :: binary_cross_entropy(MY_MATRIX_DEVICE *pa,MY_MATRIX_DEVICE *pb)
{
MY_FUNC_ERROR((pa->row == pb->row)&&(pa->column == pb->column));
MY_MATRIX_DEVICE *pc = new MY_MATRIX_DEVICE;
deque_matrix.AddLast(pc);
pc->row = pa->row;
pc->column = pa->column;
deque_operate.AddLast(pa,pb,pc,&gate_binary_cross_entropy);
return pc;
}
void gate_least_squares(void *pthis, MY_MATRIX_DEVICE *pa, MY_MATRIX_DEVICE *pb,
MY_MATRIX_DEVICE *pc, GATE_STAT stat)
{
int const threadsPerBolck = 1024;
int blocksPerGride = 0;
if((stat == FOREWARD))
{
blocksPerGride = (pc->row*pc->column + threadsPerBolck -1)/threadsPerBolck;
least_squares<<<blocksPerGride, threadsPerBolck>>>(pa->x,pb->x,pc->x,(pc->row)*(pc->column));
}
else if(stat == BACKWARD)
{
blocksPerGride = (pa->row*pa->column + threadsPerBolck -1)/threadsPerBolck;
least_squares_inv<<<blocksPerGride, threadsPerBolck>>>(pa->x,pb->x,pa->grad_x,(pa->row)*(pa->column));
blocksPerGride = (pc->row*pc->column + threadsPerBolck -1)/threadsPerBolck;
scalar_multi<<<blocksPerGride, threadsPerBolck>>>(pa->grad_x,pc->grad_x,pa->grad_x,(pc->row)*(pc->column));
}
}
MY_MATRIX_DEVICE* MY_GRAPH_NET :: least_squares(MY_MATRIX_DEVICE *pa, MY_MATRIX_DEVICE *pb)
{
MY_FUNC_ERROR((pa->row == pb->row)&&(pa->column == pb->column));
MY_MATRIX_DEVICE *pc = new MY_MATRIX_DEVICE;
deque_matrix.AddLast(pc);
pc->row = pa->row;
pc->column = pa->column;
deque_operate.AddLast(pa,pb,pc,&gate_least_squares);
return pc;
}
void gate_inverted_dropout(void *pthis, MY_MATRIX_DEVICE *pa, MY_MATRIX_DEVICE *pb,
MY_MATRIX_DEVICE *pc, GATE_STAT stat)
{
int const threadsPerBolck = 1024;
int blocksPerGride = 0;
float const dropout_rate = pb->rate;
if(stat == FOREWARD)
{
CURAND_CALL(curandGenerateUniform(((MY_GRAPH_NET*)pthis)->rand_gen,pb->x,(pb->row)*(pb->column)));
blocksPerGride = (pb->row*pb->column + threadsPerBolck -1)/threadsPerBolck;
dropout_table<<<blocksPerGride, threadsPerBolck>>>(pb->x,dropout_rate,(pb->row)*(pb->column));
blocksPerGride = (pc->row*pc->column + threadsPerBolck -1)/threadsPerBolck;
dropout<<<blocksPerGride, threadsPerBolck>>>(pa->x,pb->x,pc->x,dropout_rate,(pc->row)*(pc->column));
}
else if(stat == TEST)
{
blocksPerGride = (pc->row*pc->column + threadsPerBolck -1)/threadsPerBolck;
transfer<<<blocksPerGride, threadsPerBolck>>>(pa->x,pc->x,(pc->row)*(pc->column));
}
else if(stat == BACKWARD)
{
blocksPerGride = (pc->row*pc->column + threadsPerBolck -1)/threadsPerBolck;
dropout<<<blocksPerGride, threadsPerBolck>>>(pc->grad_x,pb->x,pa->grad_x,dropout_rate,(pc->row)*(pc->column));
}
}
MY_MATRIX_DEVICE* MY_GRAPH_NET :: inverted_dropout(MY_MATRIX_DEVICE *pa, float rate)
{
MY_MATRIX_DEVICE *pc = new MY_MATRIX_DEVICE;
deque_matrix.AddLast(pc);
MY_MATRIX_DEVICE *pb = new MY_MATRIX_DEVICE;
deque_matrix.AddLast(pb);
pc->row = pa->row;
pc->column = pa->column;
pb->rate = rate;
pb->row = pa->row;
pb->column = pa->column;
deque_operate.AddLast(pa,pb,pc,&gate_inverted_dropout);
return pc;
}
void gate_sigmoid(void *pthis, MY_MATRIX_DEVICE *pa, MY_MATRIX_DEVICE *pb,
MY_MATRIX_DEVICE *pc, GATE_STAT stat)
{
int const threadsPerBolck = 1024;
int blocksPerGride = 0;
if((stat == FOREWARD) || (stat == TEST))
{
blocksPerGride = (pa->row*pa->column + threadsPerBolck -1)/threadsPerBolck;
sigmoid<<<blocksPerGride, threadsPerBolck>>>(pc->x,pa->x,(pa->row)*(pa->column));
}
else if(stat == BACKWARD)
{
blocksPerGride = (pa->row*pa->column + threadsPerBolck -1)/threadsPerBolck;
sigmoid_inv<<<blocksPerGride, threadsPerBolck>>>(pa->grad_x,pa->x,(pa->row)*(pa->column));
blocksPerGride = (pc->row*pc->column + threadsPerBolck -1)/threadsPerBolck;
scalar_multi<<<blocksPerGride, threadsPerBolck>>>(pa->grad_x,pc->grad_x,pa->grad_x,(pc->row)*(pc->column));
}
}
MY_MATRIX_DEVICE* MY_GRAPH_NET :: sigmoid(MY_MATRIX_DEVICE *pa)
{
MY_MATRIX_DEVICE *pc = new MY_MATRIX_DEVICE;
deque_matrix.AddLast(pc);
pc->row = pa->row;
pc->column = pa->column;
deque_operate.AddLast(pa,NULL,pc,&gate_sigmoid);
return pc;
}
void gate_tanh(void *pthis, MY_MATRIX_DEVICE *pa, MY_MATRIX_DEVICE *pb,
MY_MATRIX_DEVICE *pc, GATE_STAT stat)
{
int const threadsPerBolck = 1024;
int blocksPerGride = 0;
if((stat == FOREWARD) || (stat == TEST))
{
blocksPerGride = (pa->row*pa->column + threadsPerBolck -1)/threadsPerBolck;
tanh<<<blocksPerGride, threadsPerBolck>>>(pc->x,pa->x,(pa->row)*(pa->column));
}
else if(stat == BACKWARD)
{
blocksPerGride = (pa->row*pa->column + threadsPerBolck -1)/threadsPerBolck;
tanh_inv<<<blocksPerGride, threadsPerBolck>>>(pa->grad_x,pa->x,(pa->row)*(pa->column));
blocksPerGride = (pc->row*pc->column + threadsPerBolck -1)/threadsPerBolck;
scalar_multi<<<blocksPerGride, threadsPerBolck>>>(pa->grad_x,pc->grad_x,pa->grad_x,(pc->row)*(pc->column));
}
}
MY_MATRIX_DEVICE* MY_GRAPH_NET :: tanh(MY_MATRIX_DEVICE *pa)
{
MY_MATRIX_DEVICE *pc = new MY_MATRIX_DEVICE;
deque_matrix.AddLast(pc);
pc->row = pa->row;
pc->column = pa->column;
deque_operate.AddLast(pa,NULL,pc,&gate_tanh);
return pc;
}
void gate_adding_point(void *pthis, MY_MATRIX_DEVICE *pa, MY_MATRIX_DEVICE *pb,
MY_MATRIX_DEVICE *pc, GATE_STAT stat)
{
int const threadsPerBolck = 1024;
int blocksPerGride = 0;
if((stat == FOREWARD) || (stat == TEST))
{
blocksPerGride = (pc->row*pc->column + threadsPerBolck -1)/threadsPerBolck;
add<<<blocksPerGride, threadsPerBolck>>>(pa->x,pb->x,pc->x,(pc->row)*(pc->column));
}
else if(stat ==BACKWARD)
{
blocksPerGride = (pc->row*pc->column + threadsPerBolck -1)/threadsPerBolck;
transfer<<<blocksPerGride, threadsPerBolck>>>(pa->grad_x,pc->grad_x,(pc->row)*(pc->column));
blocksPerGride = (pc->row*pc->column + threadsPerBolck -1)/threadsPerBolck;
transfer<<<blocksPerGride, threadsPerBolck>>>(pb->grad_x,pc->grad_x,(pc->row)*(pc->column));
}
}
MY_MATRIX_DEVICE* MY_GRAPH_NET :: adding_point(MY_MATRIX_DEVICE *pa, MY_MATRIX_DEVICE *pb)
{
MY_FUNC_ERROR(((pa->column == pb->column) && (pa->row == pb->row)));
MY_MATRIX_DEVICE *pc = new MY_MATRIX_DEVICE;
deque_matrix.AddLast(pc);
pc->row = pa->row;
pc->column = pa->column;
deque_operate.AddLast(pa,pb,pc,&gate_adding_point);
return pc;
}
void gate_dividing_point(void *pthis, MY_MATRIX_DEVICE *pa, MY_MATRIX_DEVICE *pb,
MY_MATRIX_DEVICE *pc, GATE_STAT stat)
{
int const threadsPerBolck = 1024;
int blocksPerGride = 0;
if((stat == FOREWARD) || (stat == TEST))
{
blocksPerGride = (pa->row*pa->column + threadsPerBolck -1)/threadsPerBolck;
transfer<<<blocksPerGride, threadsPerBolck>>>(pb->x,pa->x,(pa->row)*(pa->column));
blocksPerGride = (pa->row*pa->column + threadsPerBolck -1)/threadsPerBolck;
transfer<<<blocksPerGride, threadsPerBolck>>>(pc->x,pa->x,(pa->row)*(pa->column));
}
else if(stat ==BACKWARD)
{
blocksPerGride = (pa->row*pa->column + threadsPerBolck -1)/threadsPerBolck;
add<<<blocksPerGride, threadsPerBolck>>>(pb->grad_x,pc->grad_x,pa->grad_x,(pa->row)*(pa->column));
}
}
void MY_GRAPH_NET :: dividing_point(MY_MATRIX_DEVICE *pa, MY_MATRIX_DEVICE **pb, MY_MATRIX_DEVICE **pc)
{
*pb = new MY_MATRIX_DEVICE;
*pc = new MY_MATRIX_DEVICE;
deque_matrix.AddLast(*pb);
deque_matrix.AddLast(*pc);
(*pb)->row = pa->row;
(*pb)->column = pa->column;
(*pc)->row = pa->row;
(*pc)->column = pa->column;
deque_operate.AddLast(pa,*pb,*pc,&gate_dividing_point);
}
void gate_stack(void *pthis, MY_MATRIX_DEVICE *pa, MY_MATRIX_DEVICE *pb, MY_MATRIX_DEVICE *pc, GATE_STAT stat)
{
int const threadsPerBolck = 1024;
int blocksPerGride = 0;
if((stat == FOREWARD) || (stat == TEST))
{
blocksPerGride = (pa->row*pa->column + threadsPerBolck -1)/threadsPerBolck;
transfer<<<blocksPerGride, threadsPerBolck>>>(pc->x,pa->x,(pa->row)*(pa->column));
blocksPerGride = (pb->row*pb->column + threadsPerBolck -1)/threadsPerBolck;
transfer<<<blocksPerGride, threadsPerBolck>>>((pc->x)+((pa->row)*(pa->column)),pb->x,(pb->row)*(pb->column));
}
else if(stat == BACKWARD)
{
blocksPerGride = (pa->row*pa->column + threadsPerBolck -1)/threadsPerBolck;
transfer<<<blocksPerGride, threadsPerBolck>>>(pa->grad_x,pc->grad_x,(pa->row)*(pa->column));
blocksPerGride = (pb->row*pb->column + threadsPerBolck -1)/threadsPerBolck;
transfer<<<blocksPerGride, threadsPerBolck>>>(pb->grad_x,(pc->grad_x)+((pa->row)*(pa->column)),
(pb->row)*(pb->column));
}
}
MY_MATRIX_DEVICE* MY_GRAPH_NET :: stack(MY_MATRIX_DEVICE *pa, MY_MATRIX_DEVICE *pb)
{
MY_FUNC_ERROR(pa->row == pb->row);
MY_MATRIX_DEVICE *pc = new MY_MATRIX_DEVICE;
deque_matrix.AddLast(pc);
pc->row = pa->row;
pc->column = pa->column + pb->column;
deque_operate.AddLast(pa,pb,pc,&gate_stack);
return pc;
}
void gate_merge(void *pthis, MY_MATRIX_DEVICE *pa, MY_MATRIX_DEVICE *pb, MY_MATRIX_DEVICE *pc, GATE_STAT stat)
{
int const threadsPerBolck = 1024;
int blocksPerGride = 0;
if((stat == FOREWARD) || (stat == TEST))
{
blocksPerGride = (pa->row*pa->column + threadsPerBolck -1)/threadsPerBolck;
merge<<<blocksPerGride, threadsPerBolck>>>(pc->x,pc->row,
pa->x,pa->row,0,(pa->row)*(pa->column));
blocksPerGride = (pb->row*pb->column + threadsPerBolck -1)/threadsPerBolck;
merge<<<blocksPerGride, threadsPerBolck>>>(pc->x,pc->row,
pb->x,pb->row, pa->row,(pb->row)*(pb->column));
}
else if(stat == BACKWARD)
{
printf("sdfssdf\n");
blocksPerGride = (pa->row*pa->column + threadsPerBolck -1)/threadsPerBolck;
inv_merge<<<blocksPerGride, threadsPerBolck>>>(pa->grad_x,pa->row,
pc->grad_x,pc->row,0,(pa->row)*(pa->column));
blocksPerGride = (pb->row*pb->column + threadsPerBolck -1)/threadsPerBolck;
inv_merge<<<blocksPerGride, threadsPerBolck>>>(pb->grad_x,pb->row,
pc->grad_x,pc->row,pa->row, (pb->row)*(pb->column));
}
}
MY_MATRIX_DEVICE* MY_GRAPH_NET :: merge(MY_MATRIX_DEVICE *pa, MY_MATRIX_DEVICE *pb)
{
MY_FUNC_ERROR(pa->column == pb->column);
MY_MATRIX_DEVICE *pc = new MY_MATRIX_DEVICE;
deque_matrix.AddLast(pc);
pc->row = pa->row + pb->row;
pc->column = pa->column;
deque_operate.AddLast(pa,pb,pc,&gate_merge);
return pc;
}
void gate_rand_scale(void *pthis, MY_MATRIX_DEVICE *pa, MY_MATRIX_DEVICE *pb, MY_MATRIX_DEVICE *pc, GATE_STAT stat)
{
int const threadsPerBolck = 1024;
int blocksPerGride = 0;
if((stat == FOREWARD) || (stat == TEST))
{
float from = pa->rate;
float to = pc->rate;
float temp = (rand()%(int)((to - from)*1000))/1000.0 + from;
blocksPerGride = (pa->row*pa->column + threadsPerBolck -1)/threadsPerBolck;
multi_scala<<<blocksPerGride, threadsPerBolck>>>(pc->x,pa->x,temp,(pa->row)*(pa->column));
pa->rate = temp;
}
else if(stat == BACKWARD)
{
float temp = pa->rate;
blocksPerGride = (pa->row*pa->column + threadsPerBolck -1)/threadsPerBolck;
multi_scala<<<blocksPerGride, threadsPerBolck>>>(pa->grad_x,pc->grad_x,temp,(pa->row)*(pa->column));
}
}
MY_MATRIX_DEVICE* MY_GRAPH_NET :: rand_scale(MY_MATRIX_DEVICE *pa, float from, float to)
{
MY_MATRIX_DEVICE *pc = new MY_MATRIX_DEVICE;
deque_matrix.AddLast(pc);
pa->rate = from;
pc->rate = to;
pc->row = pa->row;
pc->column = pa->column;
deque_operate.AddLast(pa,NULL,pc,&gate_rand_scale);
return pc;
}
void gate_scale(void *pthis, MY_MATRIX_DEVICE *pa, MY_MATRIX_DEVICE *pb, MY_MATRIX_DEVICE *pc, GATE_STAT stat)
{
int const threadsPerBolck = 1024;
int blocksPerGride = 0;
float scale = pa->rate;
if((stat == FOREWARD) || (stat == TEST))
{
blocksPerGride = (pa->row*pa->column + threadsPerBolck -1)/threadsPerBolck;
multi_scala<<<blocksPerGride, threadsPerBolck>>>(pc->x,pa->x,scale,(pa->row)*(pa->column));
}
else if(stat == BACKWARD)
{
blocksPerGride = (pa->row*pa->column + threadsPerBolck -1)/threadsPerBolck;
multi_scala<<<blocksPerGride, threadsPerBolck>>>(pa->grad_x,pc->grad_x,scale,(pa->row)*(pa->column));
}
}
MY_MATRIX_DEVICE* MY_GRAPH_NET :: scale(MY_MATRIX_DEVICE *pa, float fa)
{
MY_MATRIX_DEVICE *pc = new MY_MATRIX_DEVICE;
deque_matrix.AddLast(pc);
pc->row = pa->row;
pc->column = pa->column;
pa->rate = fa;
deque_operate.AddLast(pa,NULL,pc,&gate_scale);
return pc;
}
void gate_uniform_noise(void *pthis, MY_MATRIX_DEVICE *pa, MY_MATRIX_DEVICE *pb, MY_MATRIX_DEVICE *pc, GATE_STAT stat)
{
if((stat == FOREWARD) || (stat == TEST))
{
CURAND_CALL(curandGenerateUniform(((MY_GRAPH_NET*)pthis)->rand_gen,pc->x,(pc->row)*(pc->column)));
}
}
MY_MATRIX_DEVICE* MY_GRAPH_NET :: uniform_noise(int row, int column)
{
MY_MATRIX_DEVICE *pc = new MY_MATRIX_DEVICE;
deque_matrix.AddLast(pc);
pc->row = row;
pc->column = column;
deque_operate.AddLast(NULL,NULL,pc,&gate_uniform_noise);
return pc;
}
void gate_white_noise(void *pthis, MY_MATRIX_DEVICE *pa, MY_MATRIX_DEVICE *pb, MY_MATRIX_DEVICE *pc, GATE_STAT stat)
{
int const threadsPerBolck = 1024;
int blocksPerGride = 0;
if((stat == FOREWARD) || (stat == TEST))
{
CURAND_CALL(curandGenerateUniform(((MY_GRAPH_NET*)pthis)->rand_gen,pc->x,(pc->row)*(pc->column)));
blocksPerGride = (pc->row*pc->column + threadsPerBolck -1)/threadsPerBolck;
dropout_table<<<blocksPerGride, threadsPerBolck>>>(pc->x,pc->rate,(pc->row)*(pc->column));
}
}
MY_MATRIX_DEVICE* MY_GRAPH_NET :: white_noise(int row, int column,float rate)
{
MY_MATRIX_DEVICE *pc = new MY_MATRIX_DEVICE;
deque_matrix.AddLast(pc);
pc->row = row;
pc->column = column;
pc->rate = rate;
deque_operate.AddLast(NULL,NULL,pc,&gate_white_noise);
return pc;
}
void gate_min(void *pthis, MY_MATRIX_DEVICE *pa, MY_MATRIX_DEVICE *pb, MY_MATRIX_DEVICE *pc, GATE_STAT stat)
{
int const threadsPerBolck = 1024;
int blocksPerGride = 0;
if((stat == FOREWARD) || (stat == TEST))
{
blocksPerGride = (pa->row*pa->column + threadsPerBolck -1)/threadsPerBolck;
min<<<blocksPerGride, threadsPerBolck>>>(pc->x,pa->x,pc->rate,(pa->row)*(pa->column));
}
else if(stat == BACKWARD)
{
blocksPerGride = (pa->row*pa->column + threadsPerBolck -1)/threadsPerBolck;
min_inv<<<blocksPerGride, threadsPerBolck>>>(pa->grad_x,pa->x,pc->rate,(pa->row)*(pa->column));
blocksPerGride = (pc->row*pc->column + threadsPerBolck -1)/threadsPerBolck;
scalar_multi<<<blocksPerGride, threadsPerBolck>>>(pa->grad_x,pc->grad_x,pa->grad_x,(pc->row)*(pc->column));
}
}
MY_MATRIX_DEVICE* MY_GRAPH_NET :: min(MY_MATRIX_DEVICE *pa, float max_value)
{
MY_MATRIX_DEVICE *pc = new MY_MATRIX_DEVICE;
deque_matrix.AddLast(pc);
pc->row = pa->row;
pc->column = pa->column;
pc->rate = max_value;
deque_operate.AddLast(pa,NULL,pc,&gate_min);
return pc;
}
float MY_GRAPH_NET :: sum_absolute(MY_MATRIX_DEVICE *pa)
{
float result;
CUBLAS_CALL(cublasSasum(handle,(pa->row)*(pa->column),pa->x,1,&result));
return result;
}
float MY_GRAPH_NET :: average_absolute(MY_MATRIX_DEVICE *pa)
{
float result;
CUBLAS_CALL(cublasSasum(handle,(pa->row)*(pa->column),pa->x,1,&result));
return result/((pa->row)*(pa->column));
}
float MY_GRAPH_NET :: accuracy(MY_MATRIX_DEVICE *y, MY_MATRIX_DEVICE *t)
{
MY_FUNC_ERROR((y->row == t->row)&&(y->column == t->column));
int const threadsPerBolck = 1024;
int blocksPerGride = 0;
float result;
blocksPerGride = (y->column + threadsPerBolck -1)/threadsPerBolck;
accuracy_table<<<blocksPerGride, threadsPerBolck>>>(y->x,t->x,d_temp.x,y->row, y->column);
CUBLAS_CALL(cublasSasum(handle,y->column,d_temp.x,1,&result));
return result/y->column;
}
//-----------------------------
MY_MOMENTUM_OPTIMIZER :: MY_MOMENTUM_OPTIMIZER()
{
learning_rate = 0.1;
momentum_rate = 0.0;
}
MY_MOMENTUM_OPTIMIZER :: ~MY_MOMENTUM_OPTIMIZER()
{
}
void MY_MOMENTUM_OPTIMIZER :: set_hyperpara(float l_rate, float m_rate)
{
learning_rate = l_rate;
momentum_rate = m_rate;
}
void gate_momentum_update(void *pthis, MY_MATRIX_DEVICE *pa, MY_MATRIX_DEVICE *pb,
MY_MATRIX_DEVICE *pc, GATE_STAT stat)
{
int const threadsPerBolck = 1024;
int blocksPerGride = 0;
float l_rate = ((MY_MOMENTUM_OPTIMIZER*)pthis)->learning_rate;
float m_rate = ((MY_MOMENTUM_OPTIMIZER*)pthis)->momentum_rate;
blocksPerGride = (pa->row*pa->column + threadsPerBolck -1)/threadsPerBolck;
momentum_vector<<<blocksPerGride, threadsPerBolck>>>(pa->grad_x,pc->grad_x,l_rate,m_rate,(pa->row)*(pa->column));
blocksPerGride = (pa->row*pa->column + threadsPerBolck -1)/threadsPerBolck;
add<<<blocksPerGride, threadsPerBolck>>>(pa->x,pc->grad_x,pa->x,(pa->row)*(pa->column));
}
void MY_MOMENTUM_OPTIMIZER :: set_para(MY_MATRIX_DEVICE *pa, ...)
{
int const threadsPerBolck = 1024;
int blocksPerGride = 0;
MY_MATRIX_DEVICE *pc = new MY_MATRIX_DEVICE;
deque_matrix.AddLast(pc);
pc->row = pa->row;
pc->column = pa->column;
CUDA_CALL(cudaMalloc(&(pc->grad_x),sizeof(float)*(pc->row)*(pc->column)));
blocksPerGride = ((pc->row)*(pc->column) + threadsPerBolck -1)/threadsPerBolck;
make_zeros<<<blocksPerGride, threadsPerBolck>>>(pc->grad_x,(pc->row)*(pc->column));
deque_operate.AddLast(pa,NULL,pc,&gate_momentum_update);
va_list ap;
MY_MATRIX_DEVICE *arg;
va_start(ap,pa);
while(1){
arg=va_arg(ap,MY_MATRIX_DEVICE*);
if (arg == NULL) break;
pc = new MY_MATRIX_DEVICE;
deque_matrix.AddLast(pc);
pc->row = arg->row;
pc->column = arg->column;
CUDA_CALL(cudaMalloc(&(pc->grad_x),sizeof(float)*(pc->row)*(pc->column)));
blocksPerGride = ((pc->row)*(pc->column) + threadsPerBolck -1)/threadsPerBolck;
make_zeros<<<blocksPerGride, threadsPerBolck>>>(pc->grad_x,(pc->row)*(pc->column));
deque_operate.AddLast(arg,NULL,pc,&gate_momentum_update);
}
va_end(ap);
}
void MY_MOMENTUM_OPTIMIZER :: update()
{
_node_graph_net *cur = deque_operate.head;
do{
cur->operate(this,cur->in1,cur->in2,cur->out,BACKWARD);
cur = cur->next;
}
while(cur != NULL);
}
//----------------------------------------------
MY_ADAM_OPTIMIZER :: MY_ADAM_OPTIMIZER()
{
learning_rate = 0.0001;
beta1 = 0.9;
beta2 = 0.999;
beta1_t = beta1;
beta2_t = beta2;
}
MY_ADAM_OPTIMIZER :: ~MY_ADAM_OPTIMIZER()
{
}
void MY_ADAM_OPTIMIZER :: set_hyperpara(float l_rate,float beta1_rate, float beta2_rate)
{
learning_rate = l_rate;
beta1 = beta1_rate;
beta2 = beta2_rate;
beta1_t = beta1;
beta2_t = beta2;;
}
void gate_adam_update(void *pthis, MY_MATRIX_DEVICE *pa, MY_MATRIX_DEVICE *pb,
MY_MATRIX_DEVICE *pc, GATE_STAT stat)
{
int const threadsPerBolck = 1024;
int blocksPerGride = 0;
float learning_rate = ((MY_ADAM_OPTIMIZER*)pthis)->learning_rate;
float beta1 = ((MY_ADAM_OPTIMIZER*)pthis)->beta1;
float beta2 = ((MY_ADAM_OPTIMIZER*)pthis)->beta2;
float beta1_t = ((MY_ADAM_OPTIMIZER*)pthis)->beta1_t;
float beta2_t = ((MY_ADAM_OPTIMIZER*)pthis)->beta2_t;
blocksPerGride = (pa->row*pa->column + threadsPerBolck -1)/threadsPerBolck;
adam_beta1<<<blocksPerGride, threadsPerBolck>>>(pa->grad_x,pb->grad_x,beta1,(pa->row)*(pa->column));
blocksPerGride = (pa->row*pa->column + threadsPerBolck -1)/threadsPerBolck;
adam_beta2<<<blocksPerGride, threadsPerBolck>>>(pa->grad_x,pc->grad_x,beta2,(pa->row)*(pa->column));
blocksPerGride = (pa->row*pa->column + threadsPerBolck -1)/threadsPerBolck;
adam_sum<<<blocksPerGride, threadsPerBolck>>>(pb->grad_x,pc->grad_x,pa->x,learning_rate,
beta1_t,beta2_t,(pa->row)*(pa->column));
}
void MY_ADAM_OPTIMIZER :: set_para(MY_MATRIX_DEVICE *pa, ...)
{
int const threadsPerBolck = 1024;
int blocksPerGride = 0;
MY_MATRIX_DEVICE *pb = new MY_MATRIX_DEVICE;
MY_MATRIX_DEVICE *pc = new MY_MATRIX_DEVICE;
deque_matrix.AddLast(pb);
deque_matrix.AddLast(pc);
pb->row = pa->row;
pb->column = pa->column;
CUDA_CALL(cudaMalloc(&(pb->grad_x),sizeof(float)*(pb->row)*(pb->column)));
blocksPerGride = ((pb->row)*(pb->column) + threadsPerBolck -1)/threadsPerBolck;
make_zeros<<<blocksPerGride, threadsPerBolck>>>(pb->grad_x,(pb->row)*(pb->column));
pc->row = pa->row;
pc->column = pa->column;
CUDA_CALL(cudaMalloc(&(pc->grad_x),sizeof(float)*(pc->row)*(pc->column)));
blocksPerGride = ((pc->row)*(pc->column) + threadsPerBolck -1)/threadsPerBolck;
make_zeros<<<blocksPerGride, threadsPerBolck>>>(pc->grad_x,(pc->row)*(pc->column));
deque_operate.AddLast(pa,pb,pc,&gate_adam_update);
va_list ap;
MY_MATRIX_DEVICE *arg;
va_start(ap,pa);
while(1){
arg=va_arg(ap,MY_MATRIX_DEVICE*);
if (arg == NULL) break;
pb = new MY_MATRIX_DEVICE;
pc = new MY_MATRIX_DEVICE;
deque_matrix.AddLast(pb);
deque_matrix.AddLast(pc);
pb->row = arg->row;
pb->column = arg->column;
CUDA_CALL(cudaMalloc(&(pb->grad_x),sizeof(float)*(pb->row)*(pb->column)));
blocksPerGride = ((pb->row)*(pb->column) + threadsPerBolck -1)/threadsPerBolck;
make_zeros<<<blocksPerGride, threadsPerBolck>>>(pb->grad_x,(pb->row)*(pb->column));
pc->row = arg->row;
pc->column = arg->column;
CUDA_CALL(cudaMalloc(&(pc->grad_x),sizeof(float)*(pc->row)*(pc->column)));
blocksPerGride = ((pc->row)*(pc->column) + threadsPerBolck -1)/threadsPerBolck;
make_zeros<<<blocksPerGride, threadsPerBolck>>>(pc->grad_x,(pc->row)*(pc->column));
deque_operate.AddLast(arg,pb,pc,&gate_adam_update);
}
va_end(ap);
}
void MY_ADAM_OPTIMIZER :: update()
{
_node_graph_net *cur = deque_operate.head;
do{
cur->operate(this,cur->in1,cur->in2,cur->out,BACKWARD);
cur = cur->next;
}
while(cur != NULL);
beta1_t = beta1_t * beta1;
beta2_t = beta2_t * beta2;
}
//-------------------------------------------------
MY_REGULARIZATION :: MY_REGULARIZATION()
{
L1_rate = 1e-8;
L2_rate = 1e-8;
max_rate = 2.0;
}
MY_REGULARIZATION :: ~MY_REGULARIZATION()
{
}
void gate_max_norm(void *pthis, MY_MATRIX_DEVICE *pa, MY_MATRIX_DEVICE *pb, MY_MATRIX_DEVICE *pc, GATE_STAT stat)
{
int const threadsPerBolck = 1024;
int blocksPerGride = 0;
float rate = ((MY_REGULARIZATION*)pthis)->max_rate;
blocksPerGride = (pa->row*pa->column + threadsPerBolck -1)/threadsPerBolck;
max_norm<<<blocksPerGride, threadsPerBolck>>>(pa->x,rate,(pa->row)*(pa->column));
}
void MY_REGULARIZATION :: set_para(REGULARIZATION_STAT stat, float rate, ...)
{
va_list ap;
MY_MATRIX_DEVICE *arg;
va_start(ap,rate);
while(1){
arg=va_arg(ap,MY_MATRIX_DEVICE*);
if (arg == NULL) break;
if(stat == MAX_NORM){
max_rate = rate;
deque_operate.AddLast(arg,NULL,NULL,&gate_max_norm);
}
else{
printf("there are no L1,L2 norm");
exit(1);
}
}
va_end(ap);
}
void MY_REGULARIZATION :: update()
{
_node_graph_net *cur = deque_operate.head;
do{
cur->operate(this,cur->in1,cur->in2,cur->out,BACKWARD);
cur = cur->next;
}
while(cur != NULL);
}
|
19bfab031c09bee6944a771379e12c9648dc10fa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <math.h>
#ifdef _WIN64
#include <hipfft.h>
#define PI_FLOAT 3.14159265358979323846264338327f
#define OUTPUT
//#define OUTPUT_GF
#define OUTPUT_CHARGE
//#define OUTPUT_CHARGE_FFT
//#define OUTPUT_CHARGE_FFT_GF
//#define OUTPUT_POTENTIAL
void displayDeviceProperties(hipDeviceProp_t* pDeviceProp);
__global__ void createGreensFunc(hipfftReal* greensfunc, unsigned int Nx, unsigned int Ny, unsigned int Nz, float h) {
unsigned int tmp;
unsigned int coord[3];
for(int i = blockDim.x*blockIdx.x+threadIdx.x; i < Nz * Ny * (Nx/2+1); i += gridDim.x*blockDim.x) {
coord[0] = i % (Nx/2+1);
tmp = i / (Nx/2+1);
coord[1] = tmp % Ny;
coord[2] = tmp / Ny;
/* Setting 0th fourier mode to 0.0 enforces charge neutrality (effectively
adds homogeneous counter charge). This is necessary, since the equation
otherwise has no solution in periodic boundaries (an infinite amount of
charge would create an infinite potential). */
if(i == 0)
greensfunc[i] = 0.0f;
else
greensfunc[i] = -0.5f * h * h / (cos(2.0f*PI_FLOAT*coord[0]/(hipfftReal)Nx) +
cos(2.0f*PI_FLOAT*coord[1]/(hipfftReal)Ny) + cos(2.0f*PI_FLOAT*coord[2]/(hipfftReal)Nz) - 3.0f);
}
}
__global__ void multiplyGreensFunc(hipfftComplex* data, hipfftReal* greensfunc, unsigned int N) {
for(int i = blockDim.x*blockIdx.x+threadIdx.x; i < N; i += gridDim.x*blockDim.x) {
data[i].x *= greensfunc[i];
data[i].y *= greensfunc[i];
}
}
void displayDeviceProperties(hipDeviceProp_t* pDeviceProp)
{
if(!pDeviceProp)
return;
printf("\nDevice Name \t %s ", pDeviceProp->name);
printf("\n**************************************");
printf("\nTotal Global Memory\t\t -%d KB", (int) pDeviceProp->totalGlobalMem/1024);
printf("\nShared memory available per block \t %d KB", (int) pDeviceProp->sharedMemPerBlock/1024);
printf("\nNumber of registers per thread block \t %d", pDeviceProp->regsPerBlock);
printf("\nWarp size in threads \t %d", pDeviceProp->warpSize);
printf("\nMemory Pitch \t %d bytes", (int) pDeviceProp->memPitch);
printf("\nMaximum threads per block \t %d", pDeviceProp->maxThreadsPerBlock);
printf("\nMaximum Thread Dimension (block) \t %d %d %d", pDeviceProp->maxThreadsDim[0], pDeviceProp->maxThreadsDim[1], pDeviceProp->maxThreadsDim[2]);
printf("\nMaximum Thread Dimension (grid) \t %d %d %d", pDeviceProp->maxGridSize[0], pDeviceProp->maxGridSize[1], pDeviceProp->maxGridSize[2]);
printf("\nTotal constant memory \t %d bytes", (int) pDeviceProp->totalConstMem);
printf("\nCUDA ver \t %d.%d", pDeviceProp->major, pDeviceProp->minor);
printf("\nClock rate \t %d KHz", pDeviceProp->clockRate);
printf("\nTexture Alignment \t %d bytes", (int) pDeviceProp->textureAlignment);
printf("\nDevice Overlap \t %s", pDeviceProp-> deviceOverlap?"Allowed":"Not Allowed");
printf("\nNumber of Multi processors \t %d\n", pDeviceProp->multiProcessorCount);
}
#endif | 19bfab031c09bee6944a771379e12c9648dc10fa.cu | #include <stdio.h>
#include <math.h>
#ifdef _WIN64
#include <cufft.h>
#define PI_FLOAT 3.14159265358979323846264338327f
#define OUTPUT
//#define OUTPUT_GF
#define OUTPUT_CHARGE
//#define OUTPUT_CHARGE_FFT
//#define OUTPUT_CHARGE_FFT_GF
//#define OUTPUT_POTENTIAL
void displayDeviceProperties(cudaDeviceProp* pDeviceProp);
__global__ void createGreensFunc(cufftReal* greensfunc, unsigned int Nx, unsigned int Ny, unsigned int Nz, float h) {
unsigned int tmp;
unsigned int coord[3];
for(int i = blockDim.x*blockIdx.x+threadIdx.x; i < Nz * Ny * (Nx/2+1); i += gridDim.x*blockDim.x) {
coord[0] = i % (Nx/2+1);
tmp = i / (Nx/2+1);
coord[1] = tmp % Ny;
coord[2] = tmp / Ny;
/* Setting 0th fourier mode to 0.0 enforces charge neutrality (effectively
adds homogeneous counter charge). This is necessary, since the equation
otherwise has no solution in periodic boundaries (an infinite amount of
charge would create an infinite potential). */
if(i == 0)
greensfunc[i] = 0.0f;
else
greensfunc[i] = -0.5f * h * h / (cos(2.0f*PI_FLOAT*coord[0]/(cufftReal)Nx) +
cos(2.0f*PI_FLOAT*coord[1]/(cufftReal)Ny) + cos(2.0f*PI_FLOAT*coord[2]/(cufftReal)Nz) - 3.0f);
}
}
__global__ void multiplyGreensFunc(cufftComplex* data, cufftReal* greensfunc, unsigned int N) {
for(int i = blockDim.x*blockIdx.x+threadIdx.x; i < N; i += gridDim.x*blockDim.x) {
data[i].x *= greensfunc[i];
data[i].y *= greensfunc[i];
}
}
void displayDeviceProperties(cudaDeviceProp* pDeviceProp)
{
if(!pDeviceProp)
return;
printf("\nDevice Name \t – %s ", pDeviceProp->name);
printf("\n**************************************");
printf("\nTotal Global Memory\t\t -%d KB", (int) pDeviceProp->totalGlobalMem/1024);
printf("\nShared memory available per block \t – %d KB", (int) pDeviceProp->sharedMemPerBlock/1024);
printf("\nNumber of registers per thread block \t – %d", pDeviceProp->regsPerBlock);
printf("\nWarp size in threads \t – %d", pDeviceProp->warpSize);
printf("\nMemory Pitch \t – %d bytes", (int) pDeviceProp->memPitch);
printf("\nMaximum threads per block \t – %d", pDeviceProp->maxThreadsPerBlock);
printf("\nMaximum Thread Dimension (block) \t – %d %d %d", pDeviceProp->maxThreadsDim[0], pDeviceProp->maxThreadsDim[1], pDeviceProp->maxThreadsDim[2]);
printf("\nMaximum Thread Dimension (grid) \t – %d %d %d", pDeviceProp->maxGridSize[0], pDeviceProp->maxGridSize[1], pDeviceProp->maxGridSize[2]);
printf("\nTotal constant memory \t – %d bytes", (int) pDeviceProp->totalConstMem);
printf("\nCUDA ver \t – %d.%d", pDeviceProp->major, pDeviceProp->minor);
printf("\nClock rate \t – %d KHz", pDeviceProp->clockRate);
printf("\nTexture Alignment \t – %d bytes", (int) pDeviceProp->textureAlignment);
printf("\nDevice Overlap \t – %s", pDeviceProp-> deviceOverlap?"Allowed":"Not Allowed");
printf("\nNumber of Multi processors \t – %d\n", pDeviceProp->multiProcessorCount);
}
#endif |
890380c390d850fcddeca68b2b20e8e5e912aec5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
#include <omp.h>
#include <unistd.h>
double wctime()
{
struct timeval tv;
gettimeofday(&tv, NULL);
return (tv.tv_sec + 1E-6 * tv.tv_usec);
}
__global__ void saxpy_par(int n, float a, float *x, float *y) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
y[i] = a * x[i] + y[i];
}
}
float saxpy_check(int n, float a, float *x, float *y, float *z) {
// a, x, y == original data for saxpy
// z = result found -- with which to compare.
float s=0.0, t = 0.0;
for (int i=0; i<n; i++) {
y[i] += a * x[i] ;
s += (y[i] - z[i])*(y[i] - z[i]);
t += z[i]*z[i];
}
if (t == 0.0) return(-1);
else
return(sqrt(s/t));
}
int main() {
//size of vectors
int n = 8388608; //8*1024*1024
size_t size = n * sizeof(float);
//allocate vectors on CPU
float *x , *y, *z;
x = (float *)malloc(size);
y = (float *)malloc(size);
z = (float *)malloc(size);
//allocate vectors on GPU
//hipMalloc( void** devPtr, size_t size )
//hipSuccess = 0
float *x_GPU, *y_GPU;
if (hipMalloc((void**) &x_GPU, size) != 0) {
return -1;
}
if (hipMalloc((void**) &y_GPU, size) != 0) {
return -1;
}
float a = 1.0;
int NITER = 100;
a = a/(float)NITER;
//Initialize x and y with random numbers
for (int i = 0; i < n; i++) {
x[i] = (float)rand()/(float)rand();
y[i] = (float)rand()/(float)rand();
}
int vecLen;
for (vecLen = 2048; vecLen <= n; vecLen*=2) {
//copy vectors to GPU
hipMemcpy(x_GPU, x, vecLen * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(y_GPU, y, vecLen * sizeof(float), hipMemcpyHostToDevice);
//set grid and block dimensions
dim3 dimGrid(vecLen/1024);
dim3 dimBlock(1024);
//call saxpy_par kernel NITER times
double t1 = wctime(); //start time
for (int iter = 0; iter < NITER; iter++) {
hipLaunchKernelGGL(( saxpy_par), dim3(dimGrid), dim3(dimBlock), 0, 0, vecLen, a, x_GPU, y_GPU);
}
hipDeviceSynchronize();
double t2 = wctime(); //end time
//Copy result to CPU so it can be passed to saxpy_check
hipMemcpy(z, y_GPU, vecLen * sizeof(float), hipMemcpyDeviceToHost);
//Check error
float error = saxpy_check(vecLen, 1.0, x, y, z);
//get performance stats
//Perform a multiply and an add for each element in both arrays (2 operations)
//This happens
float flops = (2 * vecLen * NITER)/(t2 - t1);
printf("** vecLen = %d, Mflops = %.2f, err = %.2e\n", vecLen, flops*1e-6, error);
}
free(x);
free(y);
free(z);
hipFree(x_GPU);
hipFree(y_GPU);
return 0;
}
| 890380c390d850fcddeca68b2b20e8e5e912aec5.cu | #include <stdio.h>
#include <math.h>
#include <sys/time.h>
#include <omp.h>
#include <unistd.h>
double wctime()
{
struct timeval tv;
gettimeofday(&tv, NULL);
return (tv.tv_sec + 1E-6 * tv.tv_usec);
}
__global__ void saxpy_par(int n, float a, float *x, float *y) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
y[i] = a * x[i] + y[i];
}
}
float saxpy_check(int n, float a, float *x, float *y, float *z) {
// a, x, y == original data for saxpy
// z = result found -- with which to compare.
float s=0.0, t = 0.0;
for (int i=0; i<n; i++) {
y[i] += a * x[i] ;
s += (y[i] - z[i])*(y[i] - z[i]);
t += z[i]*z[i];
}
if (t == 0.0) return(-1);
else
return(sqrt(s/t));
}
int main() {
//size of vectors
int n = 8388608; //8*1024*1024
size_t size = n * sizeof(float);
//allocate vectors on CPU
float *x , *y, *z;
x = (float *)malloc(size);
y = (float *)malloc(size);
z = (float *)malloc(size);
//allocate vectors on GPU
//cudaMalloc( void** devPtr, size_t size )
//cudaSuccess = 0
float *x_GPU, *y_GPU;
if (cudaMalloc((void**) &x_GPU, size) != 0) {
return -1;
}
if (cudaMalloc((void**) &y_GPU, size) != 0) {
return -1;
}
float a = 1.0;
int NITER = 100;
a = a/(float)NITER;
//Initialize x and y with random numbers
for (int i = 0; i < n; i++) {
x[i] = (float)rand()/(float)rand();
y[i] = (float)rand()/(float)rand();
}
int vecLen;
for (vecLen = 2048; vecLen <= n; vecLen*=2) {
//copy vectors to GPU
cudaMemcpy(x_GPU, x, vecLen * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(y_GPU, y, vecLen * sizeof(float), cudaMemcpyHostToDevice);
//set grid and block dimensions
dim3 dimGrid(vecLen/1024);
dim3 dimBlock(1024);
//call saxpy_par kernel NITER times
double t1 = wctime(); //start time
for (int iter = 0; iter < NITER; iter++) {
saxpy_par<<<dimGrid, dimBlock>>>(vecLen, a, x_GPU, y_GPU);
}
cudaDeviceSynchronize();
double t2 = wctime(); //end time
//Copy result to CPU so it can be passed to saxpy_check
cudaMemcpy(z, y_GPU, vecLen * sizeof(float), cudaMemcpyDeviceToHost);
//Check error
float error = saxpy_check(vecLen, 1.0, x, y, z);
//get performance stats
//Perform a multiply and an add for each element in both arrays (2 operations)
//This happens
float flops = (2 * vecLen * NITER)/(t2 - t1);
printf("** vecLen = %d, Mflops = %.2f, err = %.2e\n", vecLen, flops*1e-6, error);
}
free(x);
free(y);
free(z);
cudaFree(x_GPU);
cudaFree(y_GPU);
return 0;
}
|
f7774f2e8ff4f42dc162aad29b7fbfa154158b22.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "aabb.h"
#include "prefix_sum.h"
#include "utils.h"
/*
* Compute the axis-aligned bounding box,
* but doing it per objects: each thread is reponsible
* to compute the aabb of only one objects (their own).
*/
__global__ void object_compute_bounding_box(const struct scene *const scene, struct AABB *aabbs)
{
size_t object_index = blockIdx.x * blockDim.x + threadIdx.x;
if (object_index >= scene->object_count) return; // Nothing to do here
// Create unbelievable point to replace them the first time.
vector3 min_point = make_float3(1000, 1000, 1000);
vector3 max_point = make_float3(-1000, -1000, -1000);
const struct object *const current_object = scene->objects + object_index;
for (uint32_t i = 0; i < current_object->triangle_count; ++i)
{
for (int j = 0; j < 3; ++j)
{
min_point.x = fmin(min_point.x, get_vertex(current_object->triangles, i)[j].x);
min_point.y = fmin(min_point.y, get_vertex(current_object->triangles, i)[j].y);
min_point.z = fmin(min_point.z, get_vertex(current_object->triangles, i)[j].z);
max_point.x = fmax(max_point.x, get_vertex(current_object->triangles, i)[j].x);
max_point.y = fmax(max_point.y, get_vertex(current_object->triangles, i)[j].y);
max_point.z = fmax(max_point.z, get_vertex(current_object->triangles, i)[j].z);
}
}
aabbs[object_index].min = min_point;
aabbs[object_index].max = max_point;
}
/* Layout dependent code */
# if defined(LAYOUT_AOS)
__device__ vector3* get_triangle_vertex(const struct scene *scene, uint32_t triangle_index)
{
return scene->objects_data.vertex_and_normal + triangle_index * 6;
}
# elif defined(LAYOUT_SOA)
__device__ vector3* get_triangle_vertex(const struct scene *scene, uint32_t triangle_index)
{
return scene->objects_data.vertex + triangle_index * 3;
}
# endif
/* End of layout dependent code */
/* Layout dependent code */
# if !defined(LAYOUT_FRAGMENTED)// LAYOUT_AOS || LAYOUT_SOA
# define AABB_TRIANGLE_BLOCK_SIZE 128
__global__ void fill_object_triangle_count(
const struct object *const objects,
size_t *objects_triangles_count,
size_t size)
{
size_t object_index = blockIdx.x * blockDim.x + threadIdx.x;
if (object_index >= size)
return; // Nothing to do here
objects_triangles_count[object_index] = objects[object_index].triangle_count;
}
__global__ void triangle_compute_bounding_box(
const struct scene *const scene,
const size_t *const objects_triangles_count,
struct AABB *aabbs,
size_t size)
{
size_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= objects_triangles_count[size - 1])
return; // Nothing to do here
// Find the objects containing the current triangle
// Use a binary search, making first steps
// essentially free: all triangles will touch the same memory cells
// allowing for coalesence reading.
size_t object_index = binary_search(objects_triangles_count, size, index);
size_t first_triangle_index = (object_index == 0
? 0
: objects_triangles_count[object_index - 1]
);
size_t first_triangle_thread_index = (first_triangle_index < index - threadIdx.x
? index - threadIdx.x// First triangle index is in another block, only taking the first of the block.
: first_triangle_index// First triangle index is in this block.
) % AABB_TRIANGLE_BLOCK_SIZE;
__shared__ struct AABB shared_aabb[AABB_TRIANGLE_BLOCK_SIZE];
vector3 *vertex = get_triangle_vertex(scene, index);
if (threadIdx.x == first_triangle_thread_index)
{// The first triangles of the object (per thread) set a starting value.
shared_aabb[first_triangle_thread_index].min = vertex[0];
shared_aabb[first_triangle_thread_index].max = vertex[0];
}
if (index == first_triangle_index)
{// The first triangle per object (globally) set a starting value
aabbs[object_index].min = vertex[0];
aabbs[object_index].max = vertex[0];
}
__syncthreads();
// Locally perform the min max
for (uint8_t i = 0; i < 3; ++i)
{
atomicMinFloat(&shared_aabb[first_triangle_thread_index].min.x, vertex[i].x);
atomicMinFloat(&shared_aabb[first_triangle_thread_index].min.y, vertex[i].y);
atomicMinFloat(&shared_aabb[first_triangle_thread_index].min.z, vertex[i].z);
atomicMaxFloat(&shared_aabb[first_triangle_thread_index].max.x, vertex[i].x);
atomicMaxFloat(&shared_aabb[first_triangle_thread_index].max.y, vertex[i].y);
atomicMaxFloat(&shared_aabb[first_triangle_thread_index].max.z, vertex[i].z);
}
__syncthreads();
// Globally perform the min max
if (threadIdx.x == first_triangle_thread_index)
{
atomicMinFloat(&aabbs[object_index].min.x, shared_aabb[first_triangle_thread_index].min.x);
atomicMinFloat(&aabbs[object_index].min.y, shared_aabb[first_triangle_thread_index].min.y);
atomicMinFloat(&aabbs[object_index].min.z, shared_aabb[first_triangle_thread_index].min.z);
atomicMaxFloat(&aabbs[object_index].max.x, shared_aabb[first_triangle_thread_index].max.x);
atomicMaxFloat(&aabbs[object_index].max.y, shared_aabb[first_triangle_thread_index].max.y);
atomicMaxFloat(&aabbs[object_index].max.z, shared_aabb[first_triangle_thread_index].max.z);
}
}
# endif
/* End of layout dependent code */
void compute_bounding_box(const struct scene *const scene, struct AABB *aabbs)
{
struct scene CPU_scene;
hipMemcpy(&CPU_scene, scene, sizeof(struct scene), hipMemcpyDefault);
/* Layout dependent code */
# if defined(LAYOUT_FRAGMENTED)
// Can't do any optimisations as the layout is fragmented,
// so triangles can't be directly accessed.
dim3 threadsPerBlock(32);
dim3 numBlocks(ceil(CPU_scene.object_count * 1.0 / threadsPerBlock.x));
hipLaunchKernelGGL(( object_compute_bounding_box), dim3(threadsPerBlock), dim3(numBlocks), 0, 0, scene, aabbs);
# else // LAYOUT_AOS || LAYOUT_SOA
size_t *objects_triangles_count;
hipMalloc(&objects_triangles_count, sizeof(size_t) * CPU_scene.object_count);
// fill with objects triangle count
hipLaunchKernelGGL(( fill_object_triangle_count), dim3(ceil(CPU_scene.object_count * 1.0 / 128)), dim3(128), 0, 0,
CPU_scene.objects,
objects_triangles_count,
CPU_scene.object_count
);
// Perform a prefix sum on it so that each triangles
// knows to which object they belongs.
shared_prefix_sum(objects_triangles_count, CPU_scene.object_count);
// Get back the triangle count.
size_t triangles_count;
hipMemcpy(
&triangles_count,
objects_triangles_count + CPU_scene.object_count - 1,
sizeof(size_t), hipMemcpyDefault
);
// Fill the aabbs
hipLaunchKernelGGL(( triangle_compute_bounding_box),
dim3(ceil(triangles_count * 1.0 / AABB_TRIANGLE_BLOCK_SIZE)),
dim3(AABB_TRIANGLE_BLOCK_SIZE)
, 0, 0, scene, objects_triangles_count, aabbs, CPU_scene.object_count);
hipFree(objects_triangles_count);
# endif
/* End of layout dependent code */
}
// https://www.scratchapixel.com/lessons/3d-basic-rendering/minimal-ray-tracer-rendering-simple-shapes/ray-box-intersection
__host__ __device__ bool hit_aabb(const struct AABB *const aabb, const struct ray *const ray)
{
float tmin, tmax, tymin, tymax, tzmin, tzmax;
vector3 inv_direction = make_float3(// If == zero, should map to infinity
1 / ray->direction.x,
1 / ray->direction.y,
1 / ray->direction.z
);
int sign_x = (inv_direction.x < 0);
int sign_y = (inv_direction.y < 0);
int sign_z = (inv_direction.z < 0);
tmin = ((sign_x ? aabb->max.x : aabb->min.x) - ray->origin.x) * inv_direction.x;
tmax = ((sign_x ? aabb->min.x : aabb->max.x) - ray->origin.x) * inv_direction.x;
tymin = ((sign_y ? aabb->max.y : aabb->min.y) - ray->origin.y) * inv_direction.y;
tymax = ((sign_y ? aabb->min.y : aabb->max.y) - ray->origin.y) * inv_direction.y;
if ((tmin > tymax) || (tymin > tmax))
return false;
if (tymin > tmin)
tmin = tymin;
if (tymax < tmax)
tmax = tymax;
tzmin = ((sign_z ? aabb->max.z : aabb->min.z) - ray->origin.z) * inv_direction.z;
tzmax = ((sign_z ? aabb->min.z : aabb->max.z) - ray->origin.z) * inv_direction.z;
if ((tmin > tzmax) || (tzmin > tmax))
return false;
/*
if (tzmin > tmin)
tmin = tzmin;
if (tzmax < tmax)
tmax = tzmax;
*/
return true;
}
| f7774f2e8ff4f42dc162aad29b7fbfa154158b22.cu | #include "aabb.h"
#include "prefix_sum.h"
#include "utils.h"
/*
* Compute the axis-aligned bounding box,
* but doing it per objects: each thread is reponsible
* to compute the aabb of only one objects (their own).
*/
__global__ void object_compute_bounding_box(const struct scene *const scene, struct AABB *aabbs)
{
size_t object_index = blockIdx.x * blockDim.x + threadIdx.x;
if (object_index >= scene->object_count) return; // Nothing to do here
// Create unbelievable point to replace them the first time.
vector3 min_point = make_float3(1000, 1000, 1000);
vector3 max_point = make_float3(-1000, -1000, -1000);
const struct object *const current_object = scene->objects + object_index;
for (uint32_t i = 0; i < current_object->triangle_count; ++i)
{
for (int j = 0; j < 3; ++j)
{
min_point.x = fmin(min_point.x, get_vertex(current_object->triangles, i)[j].x);
min_point.y = fmin(min_point.y, get_vertex(current_object->triangles, i)[j].y);
min_point.z = fmin(min_point.z, get_vertex(current_object->triangles, i)[j].z);
max_point.x = fmax(max_point.x, get_vertex(current_object->triangles, i)[j].x);
max_point.y = fmax(max_point.y, get_vertex(current_object->triangles, i)[j].y);
max_point.z = fmax(max_point.z, get_vertex(current_object->triangles, i)[j].z);
}
}
aabbs[object_index].min = min_point;
aabbs[object_index].max = max_point;
}
/* Layout dependent code */
# if defined(LAYOUT_AOS)
__device__ vector3* get_triangle_vertex(const struct scene *scene, uint32_t triangle_index)
{
return scene->objects_data.vertex_and_normal + triangle_index * 6;
}
# elif defined(LAYOUT_SOA)
__device__ vector3* get_triangle_vertex(const struct scene *scene, uint32_t triangle_index)
{
return scene->objects_data.vertex + triangle_index * 3;
}
# endif
/* End of layout dependent code */
/* Layout dependent code */
# if !defined(LAYOUT_FRAGMENTED)// LAYOUT_AOS || LAYOUT_SOA
# define AABB_TRIANGLE_BLOCK_SIZE 128
__global__ void fill_object_triangle_count(
const struct object *const objects,
size_t *objects_triangles_count,
size_t size)
{
size_t object_index = blockIdx.x * blockDim.x + threadIdx.x;
if (object_index >= size)
return; // Nothing to do here
objects_triangles_count[object_index] = objects[object_index].triangle_count;
}
__global__ void triangle_compute_bounding_box(
const struct scene *const scene,
const size_t *const objects_triangles_count,
struct AABB *aabbs,
size_t size)
{
size_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= objects_triangles_count[size - 1])
return; // Nothing to do here
// Find the objects containing the current triangle
// Use a binary search, making first steps
// essentially free: all triangles will touch the same memory cells
// allowing for coalesence reading.
size_t object_index = binary_search(objects_triangles_count, size, index);
size_t first_triangle_index = (object_index == 0
? 0
: objects_triangles_count[object_index - 1]
);
size_t first_triangle_thread_index = (first_triangle_index < index - threadIdx.x
? index - threadIdx.x// First triangle index is in another block, only taking the first of the block.
: first_triangle_index// First triangle index is in this block.
) % AABB_TRIANGLE_BLOCK_SIZE;
__shared__ struct AABB shared_aabb[AABB_TRIANGLE_BLOCK_SIZE];
vector3 *vertex = get_triangle_vertex(scene, index);
if (threadIdx.x == first_triangle_thread_index)
{// The first triangles of the object (per thread) set a starting value.
shared_aabb[first_triangle_thread_index].min = vertex[0];
shared_aabb[first_triangle_thread_index].max = vertex[0];
}
if (index == first_triangle_index)
{// The first triangle per object (globally) set a starting value
aabbs[object_index].min = vertex[0];
aabbs[object_index].max = vertex[0];
}
__syncthreads();
// Locally perform the min max
for (uint8_t i = 0; i < 3; ++i)
{
atomicMinFloat(&shared_aabb[first_triangle_thread_index].min.x, vertex[i].x);
atomicMinFloat(&shared_aabb[first_triangle_thread_index].min.y, vertex[i].y);
atomicMinFloat(&shared_aabb[first_triangle_thread_index].min.z, vertex[i].z);
atomicMaxFloat(&shared_aabb[first_triangle_thread_index].max.x, vertex[i].x);
atomicMaxFloat(&shared_aabb[first_triangle_thread_index].max.y, vertex[i].y);
atomicMaxFloat(&shared_aabb[first_triangle_thread_index].max.z, vertex[i].z);
}
__syncthreads();
// Globally perform the min max
if (threadIdx.x == first_triangle_thread_index)
{
atomicMinFloat(&aabbs[object_index].min.x, shared_aabb[first_triangle_thread_index].min.x);
atomicMinFloat(&aabbs[object_index].min.y, shared_aabb[first_triangle_thread_index].min.y);
atomicMinFloat(&aabbs[object_index].min.z, shared_aabb[first_triangle_thread_index].min.z);
atomicMaxFloat(&aabbs[object_index].max.x, shared_aabb[first_triangle_thread_index].max.x);
atomicMaxFloat(&aabbs[object_index].max.y, shared_aabb[first_triangle_thread_index].max.y);
atomicMaxFloat(&aabbs[object_index].max.z, shared_aabb[first_triangle_thread_index].max.z);
}
}
# endif
/* End of layout dependent code */
void compute_bounding_box(const struct scene *const scene, struct AABB *aabbs)
{
struct scene CPU_scene;
cudaMemcpy(&CPU_scene, scene, sizeof(struct scene), cudaMemcpyDefault);
/* Layout dependent code */
# if defined(LAYOUT_FRAGMENTED)
// Can't do any optimisations as the layout is fragmented,
// so triangles can't be directly accessed.
dim3 threadsPerBlock(32);
dim3 numBlocks(ceil(CPU_scene.object_count * 1.0 / threadsPerBlock.x));
object_compute_bounding_box<<<threadsPerBlock, numBlocks>>>(scene, aabbs);
# else // LAYOUT_AOS || LAYOUT_SOA
size_t *objects_triangles_count;
cudaMalloc(&objects_triangles_count, sizeof(size_t) * CPU_scene.object_count);
// fill with objects triangle count
fill_object_triangle_count<<<ceil(CPU_scene.object_count * 1.0 / 128), 128>>>(
CPU_scene.objects,
objects_triangles_count,
CPU_scene.object_count
);
// Perform a prefix sum on it so that each triangles
// knows to which object they belongs.
shared_prefix_sum(objects_triangles_count, CPU_scene.object_count);
// Get back the triangle count.
size_t triangles_count;
cudaMemcpy(
&triangles_count,
objects_triangles_count + CPU_scene.object_count - 1,
sizeof(size_t), cudaMemcpyDefault
);
// Fill the aabbs
triangle_compute_bounding_box<<<
ceil(triangles_count * 1.0 / AABB_TRIANGLE_BLOCK_SIZE),
AABB_TRIANGLE_BLOCK_SIZE
>>>(scene, objects_triangles_count, aabbs, CPU_scene.object_count);
cudaFree(objects_triangles_count);
# endif
/* End of layout dependent code */
}
// https://www.scratchapixel.com/lessons/3d-basic-rendering/minimal-ray-tracer-rendering-simple-shapes/ray-box-intersection
__host__ __device__ bool hit_aabb(const struct AABB *const aabb, const struct ray *const ray)
{
float tmin, tmax, tymin, tymax, tzmin, tzmax;
vector3 inv_direction = make_float3(// If == zero, should map to infinity
1 / ray->direction.x,
1 / ray->direction.y,
1 / ray->direction.z
);
int sign_x = (inv_direction.x < 0);
int sign_y = (inv_direction.y < 0);
int sign_z = (inv_direction.z < 0);
tmin = ((sign_x ? aabb->max.x : aabb->min.x) - ray->origin.x) * inv_direction.x;
tmax = ((sign_x ? aabb->min.x : aabb->max.x) - ray->origin.x) * inv_direction.x;
tymin = ((sign_y ? aabb->max.y : aabb->min.y) - ray->origin.y) * inv_direction.y;
tymax = ((sign_y ? aabb->min.y : aabb->max.y) - ray->origin.y) * inv_direction.y;
if ((tmin > tymax) || (tymin > tmax))
return false;
if (tymin > tmin)
tmin = tymin;
if (tymax < tmax)
tmax = tymax;
tzmin = ((sign_z ? aabb->max.z : aabb->min.z) - ray->origin.z) * inv_direction.z;
tzmax = ((sign_z ? aabb->min.z : aabb->max.z) - ray->origin.z) * inv_direction.z;
if ((tmin > tzmax) || (tzmin > tmax))
return false;
/*
if (tzmin > tmin)
tmin = tzmin;
if (tzmax < tmax)
tmax = tzmax;
*/
return true;
}
|
4d40e9f7ad9d68c8f40a9d655ce2267265fb0c41.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "box2d2r-512-9-512_kernel.hu"
__device__ float __sbref_wrap(float *sb, size_t index) { return sb[index]; }
__global__ void kernel0_9(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 9;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 476;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_1_3;
float __reg_1_4;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_2_3;
float __reg_2_4;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_3_3;
float __reg_3_4;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_4_3;
float __reg_4_4;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
float __reg_5_3;
float __reg_5_4;
float __reg_6_0;
float __reg_6_1;
float __reg_6_2;
float __reg_6_3;
float __reg_6_4;
float __reg_7_0;
float __reg_7_1;
float __reg_7_2;
float __reg_7_3;
float __reg_7_4;
float __reg_8_0;
float __reg_8_1;
float __reg_8_2;
float __reg_8_3;
float __reg_8_4;
float __reg_9_0;
float __reg_9_1;
float __reg_9_2;
float __reg_9_3;
float __reg_9_4;
__shared__ float __a_sb_double[__blockSize * 2];
float *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9);
const AN5D_TYPE __storeValid = __writeValid9;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { float etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { float etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC3(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC4(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC5(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC6(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid6) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC7(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid7) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC8(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid8) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC9(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid9) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_0);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_0);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_0);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_0);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_0);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_0);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_0);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_0);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_0);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_0);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_0);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_0);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(2, __reg_9_2);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(3, __reg_9_3);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(4, __reg_9_4);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(5, __reg_9_0);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(6, __reg_9_1);
__LOAD(__reg_0, 25);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(7, __reg_9_2);
__LOAD(__reg_0, 26);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(8, __reg_9_3);
__LOAD(__reg_0, 27);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(9, __reg_9_4);
__LOAD(__reg_0, 28);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(10, __reg_9_0);
__LOAD(__reg_0, 29);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(11, __reg_9_1);
__LOAD(__reg_0, 30);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(12, __reg_9_2);
__LOAD(__reg_0, 31);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(13, __reg_9_3);
__LOAD(__reg_0, 32);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(14, __reg_9_4);
__LOAD(__reg_0, 33);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(15, __reg_9_0);
__LOAD(__reg_0, 34);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(16, __reg_9_1);
__LOAD(__reg_0, 35);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(17, __reg_9_2);
__LOAD(__reg_0, 36);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(18, __reg_9_3);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__LOAD(__reg_0, 25);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__LOAD(__reg_0, 26);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__LOAD(__reg_0, 27);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__LOAD(__reg_0, 28);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__LOAD(__reg_0, 29);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__LOAD(__reg_0, 30);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__LOAD(__reg_0, 31);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__LOAD(__reg_0, 32);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__LOAD(__reg_0, 33);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__LOAD(__reg_0, 34);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__LOAD(__reg_0, 35);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__LOAD(__reg_0, 36);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(18, __reg_9_3);
}
__a_sb = __a_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 37; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 18, __reg_9_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 18, __reg_9_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 18, __reg_9_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 18, __reg_9_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 18, __reg_9_3);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 18, __reg_9_4);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 17, __reg_9_0);
__reg_1_3 = __reg_0;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 16, __reg_9_1);
__reg_2_2 = __reg_1_2;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 15, __reg_9_2);
__reg_2_3 = __reg_1_3;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 14, __reg_9_3);
__reg_3_2 = __reg_2_2;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 13, __reg_9_4);
__reg_3_3 = __reg_2_3;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 12, __reg_9_0);
__reg_4_2 = __reg_3_2;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 11, __reg_9_1);
__reg_4_3 = __reg_3_3;
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 10, __reg_9_2);
__reg_5_2 = __reg_4_2;
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_0, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 9, __reg_9_3);
__reg_5_3 = __reg_4_3;
__CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 8, __reg_9_4);
__reg_6_2 = __reg_5_2;
__CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_0, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 7, __reg_9_0);
__reg_6_3 = __reg_5_3;
__CALC7(__reg_7_4, __reg_7_4, __reg_7_4, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 6, __reg_9_1);
__reg_7_2 = __reg_6_2;
__CALC7(__reg_7_0, __reg_7_0, __reg_7_0, __reg_7_0, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 5, __reg_9_2);
__reg_7_3 = __reg_6_3;
__CALC8(__reg_8_4, __reg_8_4, __reg_8_4, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 4, __reg_9_3);
__reg_8_2 = __reg_7_2;
__CALC8(__reg_8_0, __reg_8_0, __reg_8_0, __reg_8_0, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 3, __reg_9_4);
__reg_8_3 = __reg_7_3;
__CALC9(__reg_9_4, __reg_9_4, __reg_9_4, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 2, __reg_9_0);
__CALC9(__reg_9_0, __reg_9_0, __reg_9_0, __reg_9_0, __reg_9_1, __reg_8_3);
__STORE(__h - 1, __reg_9_1);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 18, __reg_9_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 17, __reg_9_0);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 16, __reg_9_1);
__reg_1_4 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 15, __reg_9_2);
__reg_2_3 = __reg_1_3;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 14, __reg_9_3);
__reg_2_4 = __reg_1_4;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 13, __reg_9_4);
__reg_3_3 = __reg_2_3;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 12, __reg_9_0);
__reg_3_4 = __reg_2_4;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 11, __reg_9_1);
__reg_4_3 = __reg_3_3;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 10, __reg_9_2);
__reg_4_4 = __reg_3_4;
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 9, __reg_9_3);
__reg_5_3 = __reg_4_3;
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_1, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 8, __reg_9_4);
__reg_5_4 = __reg_4_4;
__CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 7, __reg_9_0);
__reg_6_3 = __reg_5_3;
__CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_1, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 6, __reg_9_1);
__reg_6_4 = __reg_5_4;
__CALC7(__reg_7_0, __reg_7_0, __reg_7_0, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 5, __reg_9_2);
__reg_7_3 = __reg_6_3;
__CALC7(__reg_7_1, __reg_7_1, __reg_7_1, __reg_7_1, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 4, __reg_9_3);
__reg_7_4 = __reg_6_4;
__CALC8(__reg_8_0, __reg_8_0, __reg_8_0, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 3, __reg_9_4);
__reg_8_3 = __reg_7_3;
__CALC8(__reg_8_1, __reg_8_1, __reg_8_1, __reg_8_1, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 2, __reg_9_0);
__reg_8_4 = __reg_7_4;
__CALC9(__reg_9_0, __reg_9_0, __reg_9_0, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 1, __reg_9_1);
__CALC9(__reg_9_1, __reg_9_1, __reg_9_1, __reg_9_1, __reg_9_2, __reg_8_4);
__STORE(__h + 0, __reg_9_2);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 18, __reg_9_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 17, __reg_9_0);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 16, __reg_9_1);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 15, __reg_9_2);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 14, __reg_9_3);
__reg_2_4 = __reg_1_4;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 13, __reg_9_4);
__reg_2_0 = __reg_1_0;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 12, __reg_9_0);
__reg_3_4 = __reg_2_4;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 11, __reg_9_1);
__reg_3_0 = __reg_2_0;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 10, __reg_9_2);
__reg_4_4 = __reg_3_4;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 9, __reg_9_3);
__reg_4_0 = __reg_3_0;
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 8, __reg_9_4);
__reg_5_4 = __reg_4_4;
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_2, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 7, __reg_9_0);
__reg_5_0 = __reg_4_0;
__CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 6, __reg_9_1);
__reg_6_4 = __reg_5_4;
__CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_2, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 5, __reg_9_2);
__reg_6_0 = __reg_5_0;
__CALC7(__reg_7_1, __reg_7_1, __reg_7_1, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 4, __reg_9_3);
__reg_7_4 = __reg_6_4;
__CALC7(__reg_7_2, __reg_7_2, __reg_7_2, __reg_7_2, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 3, __reg_9_4);
__reg_7_0 = __reg_6_0;
__CALC8(__reg_8_1, __reg_8_1, __reg_8_1, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 2, __reg_9_0);
__reg_8_4 = __reg_7_4;
__CALC8(__reg_8_2, __reg_8_2, __reg_8_2, __reg_8_2, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 1, __reg_9_1);
__reg_8_0 = __reg_7_0;
__CALC9(__reg_9_1, __reg_9_1, __reg_9_1, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h + 0, __reg_9_2);
__CALC9(__reg_9_2, __reg_9_2, __reg_9_2, __reg_9_2, __reg_9_3, __reg_8_0);
__STORE(__h + 1, __reg_9_3);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 18, __reg_9_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 17, __reg_9_0);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 16, __reg_9_1);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 15, __reg_9_2);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 14, __reg_9_3);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 13, __reg_9_4);
__reg_2_0 = __reg_1_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 12, __reg_9_0);
__reg_2_1 = __reg_1_1;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 11, __reg_9_1);
__reg_3_0 = __reg_2_0;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 10, __reg_9_2);
__reg_3_1 = __reg_2_1;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 9, __reg_9_3);
__reg_4_0 = __reg_3_0;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 8, __reg_9_4);
__reg_4_1 = __reg_3_1;
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 7, __reg_9_0);
__reg_5_0 = __reg_4_0;
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_3, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 6, __reg_9_1);
__reg_5_1 = __reg_4_1;
__CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 5, __reg_9_2);
__reg_6_0 = __reg_5_0;
__CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_3, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 4, __reg_9_3);
__reg_6_1 = __reg_5_1;
__CALC7(__reg_7_2, __reg_7_2, __reg_7_2, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 3, __reg_9_4);
__reg_7_0 = __reg_6_0;
__CALC7(__reg_7_3, __reg_7_3, __reg_7_3, __reg_7_3, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 2, __reg_9_0);
__reg_7_1 = __reg_6_1;
__CALC8(__reg_8_2, __reg_8_2, __reg_8_2, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 1, __reg_9_1);
__reg_8_0 = __reg_7_0;
__CALC8(__reg_8_3, __reg_8_3, __reg_8_3, __reg_8_3, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h + 0, __reg_9_2);
__reg_8_1 = __reg_7_1;
__CALC9(__reg_9_2, __reg_9_2, __reg_9_2, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h + 1, __reg_9_3);
__CALC9(__reg_9_3, __reg_9_3, __reg_9_3, __reg_9_3, __reg_9_4, __reg_8_1);
__STORE(__h + 2, __reg_9_4);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 18, __reg_9_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 17, __reg_9_0);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 16, __reg_9_1);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 15, __reg_9_2);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 14, __reg_9_3);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 13, __reg_9_4);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 12, __reg_9_0);
__reg_2_1 = __reg_1_1;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 11, __reg_9_1);
__reg_2_2 = __reg_1_2;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 10, __reg_9_2);
__reg_3_1 = __reg_2_1;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 9, __reg_9_3);
__reg_3_2 = __reg_2_2;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 8, __reg_9_4);
__reg_4_1 = __reg_3_1;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 7, __reg_9_0);
__reg_4_2 = __reg_3_2;
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 6, __reg_9_1);
__reg_5_1 = __reg_4_1;
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_4, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 5, __reg_9_2);
__reg_5_2 = __reg_4_2;
__CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 4, __reg_9_3);
__reg_6_1 = __reg_5_1;
__CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_4, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 3, __reg_9_4);
__reg_6_2 = __reg_5_2;
__CALC7(__reg_7_3, __reg_7_3, __reg_7_3, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 2, __reg_9_0);
__reg_7_1 = __reg_6_1;
__CALC7(__reg_7_4, __reg_7_4, __reg_7_4, __reg_7_4, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 1, __reg_9_1);
__reg_7_2 = __reg_6_2;
__CALC8(__reg_8_3, __reg_8_3, __reg_8_3, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h + 0, __reg_9_2);
__reg_8_1 = __reg_7_1;
__CALC8(__reg_8_4, __reg_8_4, __reg_8_4, __reg_8_4, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h + 1, __reg_9_3);
__reg_8_2 = __reg_7_2;
__CALC9(__reg_9_3, __reg_9_3, __reg_9_3, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h + 2, __reg_9_4);
__CALC9(__reg_9_4, __reg_9_4, __reg_9_4, __reg_9_4, __reg_9_0, __reg_8_2);
__STORE(__h + 3, __reg_9_0);
}
}
else
{
for (__h = 37; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 18, __reg_9_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 18, __reg_9_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 18, __reg_9_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 18, __reg_9_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 18, __reg_9_3);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 18, __reg_9_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 18, __reg_9_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 18, __reg_9_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 18, __reg_9_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 18, __reg_9_3);
__h++;
}
}
__global__ void kernel0_8(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 8;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 480;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_1_3;
float __reg_1_4;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_2_3;
float __reg_2_4;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_3_3;
float __reg_3_4;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_4_3;
float __reg_4_4;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
float __reg_5_3;
float __reg_5_4;
float __reg_6_0;
float __reg_6_1;
float __reg_6_2;
float __reg_6_3;
float __reg_6_4;
float __reg_7_0;
float __reg_7_1;
float __reg_7_2;
float __reg_7_3;
float __reg_7_4;
float __reg_8_0;
float __reg_8_1;
float __reg_8_2;
float __reg_8_3;
float __reg_8_4;
__shared__ float __a_sb_double[__blockSize * 2];
float *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __storeValid = __writeValid8;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { float etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { float etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC3(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC4(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC5(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC6(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid6) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC7(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid7) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC8(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid8) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_0);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_0);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_0);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_0);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_0);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_0);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_0);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_0);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_0);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_0);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(2, __reg_8_2);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(3, __reg_8_3);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(4, __reg_8_4);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(5, __reg_8_0);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(6, __reg_8_1);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(7, __reg_8_2);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(8, __reg_8_3);
__LOAD(__reg_0, 25);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(9, __reg_8_4);
__LOAD(__reg_0, 26);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(10, __reg_8_0);
__LOAD(__reg_0, 27);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(11, __reg_8_1);
__LOAD(__reg_0, 28);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(12, __reg_8_2);
__LOAD(__reg_0, 29);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(13, __reg_8_3);
__LOAD(__reg_0, 30);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(14, __reg_8_4);
__LOAD(__reg_0, 31);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(15, __reg_8_0);
__LOAD(__reg_0, 32);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(16, __reg_8_1);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__LOAD(__reg_0, 25);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__LOAD(__reg_0, 26);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__LOAD(__reg_0, 27);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__LOAD(__reg_0, 28);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__LOAD(__reg_0, 29);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__LOAD(__reg_0, 30);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__LOAD(__reg_0, 31);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__LOAD(__reg_0, 32);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(16, __reg_8_1);
}
__a_sb = __a_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 33; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 16, __reg_8_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 16, __reg_8_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 16, __reg_8_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 16, __reg_8_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 16, __reg_8_1);
__h++;
}
if (0) {}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 16, __reg_8_2);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 15, __reg_8_3);
__reg_1_4 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 14, __reg_8_4);
__reg_2_3 = __reg_1_3;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 13, __reg_8_0);
__reg_2_4 = __reg_1_4;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 12, __reg_8_1);
__reg_3_3 = __reg_2_3;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 11, __reg_8_2);
__reg_3_4 = __reg_2_4;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 10, __reg_8_3);
__reg_4_3 = __reg_3_3;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 9, __reg_8_4);
__reg_4_4 = __reg_3_4;
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 8, __reg_8_0);
__reg_5_3 = __reg_4_3;
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_1, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 7, __reg_8_1);
__reg_5_4 = __reg_4_4;
__CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 6, __reg_8_2);
__reg_6_3 = __reg_5_3;
__CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_1, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 5, __reg_8_3);
__reg_6_4 = __reg_5_4;
__CALC7(__reg_7_0, __reg_7_0, __reg_7_0, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 4, __reg_8_4);
__reg_7_3 = __reg_6_3;
__CALC7(__reg_7_1, __reg_7_1, __reg_7_1, __reg_7_1, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 3, __reg_8_0);
__reg_7_4 = __reg_6_4;
__CALC8(__reg_8_0, __reg_8_0, __reg_8_0, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 2, __reg_8_1);
__CALC8(__reg_8_1, __reg_8_1, __reg_8_1, __reg_8_1, __reg_8_2, __reg_7_4);
__STORE(__h - 1, __reg_8_2);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 16, __reg_8_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 15, __reg_8_3);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 14, __reg_8_4);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 13, __reg_8_0);
__reg_2_4 = __reg_1_4;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 12, __reg_8_1);
__reg_2_0 = __reg_1_0;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 11, __reg_8_2);
__reg_3_4 = __reg_2_4;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 10, __reg_8_3);
__reg_3_0 = __reg_2_0;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 9, __reg_8_4);
__reg_4_4 = __reg_3_4;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 8, __reg_8_0);
__reg_4_0 = __reg_3_0;
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 7, __reg_8_1);
__reg_5_4 = __reg_4_4;
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_2, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 6, __reg_8_2);
__reg_5_0 = __reg_4_0;
__CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 5, __reg_8_3);
__reg_6_4 = __reg_5_4;
__CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_2, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 4, __reg_8_4);
__reg_6_0 = __reg_5_0;
__CALC7(__reg_7_1, __reg_7_1, __reg_7_1, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 3, __reg_8_0);
__reg_7_4 = __reg_6_4;
__CALC7(__reg_7_2, __reg_7_2, __reg_7_2, __reg_7_2, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 2, __reg_8_1);
__reg_7_0 = __reg_6_0;
__CALC8(__reg_8_1, __reg_8_1, __reg_8_1, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 1, __reg_8_2);
__CALC8(__reg_8_2, __reg_8_2, __reg_8_2, __reg_8_2, __reg_8_3, __reg_7_0);
__STORE(__h + 0, __reg_8_3);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 16, __reg_8_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 15, __reg_8_3);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 14, __reg_8_4);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 13, __reg_8_0);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 12, __reg_8_1);
__reg_2_0 = __reg_1_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 11, __reg_8_2);
__reg_2_1 = __reg_1_1;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 10, __reg_8_3);
__reg_3_0 = __reg_2_0;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 9, __reg_8_4);
__reg_3_1 = __reg_2_1;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 8, __reg_8_0);
__reg_4_0 = __reg_3_0;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 7, __reg_8_1);
__reg_4_1 = __reg_3_1;
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 6, __reg_8_2);
__reg_5_0 = __reg_4_0;
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_3, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 5, __reg_8_3);
__reg_5_1 = __reg_4_1;
__CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 4, __reg_8_4);
__reg_6_0 = __reg_5_0;
__CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_3, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 3, __reg_8_0);
__reg_6_1 = __reg_5_1;
__CALC7(__reg_7_2, __reg_7_2, __reg_7_2, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 2, __reg_8_1);
__reg_7_0 = __reg_6_0;
__CALC7(__reg_7_3, __reg_7_3, __reg_7_3, __reg_7_3, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 1, __reg_8_2);
__reg_7_1 = __reg_6_1;
__CALC8(__reg_8_2, __reg_8_2, __reg_8_2, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h + 0, __reg_8_3);
__CALC8(__reg_8_3, __reg_8_3, __reg_8_3, __reg_8_3, __reg_8_4, __reg_7_1);
__STORE(__h + 1, __reg_8_4);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 16, __reg_8_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 15, __reg_8_3);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 14, __reg_8_4);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 13, __reg_8_0);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 12, __reg_8_1);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 11, __reg_8_2);
__reg_2_1 = __reg_1_1;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 10, __reg_8_3);
__reg_2_2 = __reg_1_2;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 9, __reg_8_4);
__reg_3_1 = __reg_2_1;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 8, __reg_8_0);
__reg_3_2 = __reg_2_2;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 7, __reg_8_1);
__reg_4_1 = __reg_3_1;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 6, __reg_8_2);
__reg_4_2 = __reg_3_2;
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 5, __reg_8_3);
__reg_5_1 = __reg_4_1;
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_4, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 4, __reg_8_4);
__reg_5_2 = __reg_4_2;
__CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 3, __reg_8_0);
__reg_6_1 = __reg_5_1;
__CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_4, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 2, __reg_8_1);
__reg_6_2 = __reg_5_2;
__CALC7(__reg_7_3, __reg_7_3, __reg_7_3, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 1, __reg_8_2);
__reg_7_1 = __reg_6_1;
__CALC7(__reg_7_4, __reg_7_4, __reg_7_4, __reg_7_4, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h + 0, __reg_8_3);
__reg_7_2 = __reg_6_2;
__CALC8(__reg_8_3, __reg_8_3, __reg_8_3, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h + 1, __reg_8_4);
__CALC8(__reg_8_4, __reg_8_4, __reg_8_4, __reg_8_4, __reg_8_0, __reg_7_2);
__STORE(__h + 2, __reg_8_0);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 16, __reg_8_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 15, __reg_8_3);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 14, __reg_8_4);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 13, __reg_8_0);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 12, __reg_8_1);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 11, __reg_8_2);
__reg_1_3 = __reg_0;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 10, __reg_8_3);
__reg_2_2 = __reg_1_2;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 9, __reg_8_4);
__reg_2_3 = __reg_1_3;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 8, __reg_8_0);
__reg_3_2 = __reg_2_2;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 7, __reg_8_1);
__reg_3_3 = __reg_2_3;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 6, __reg_8_2);
__reg_4_2 = __reg_3_2;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 5, __reg_8_3);
__reg_4_3 = __reg_3_3;
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 4, __reg_8_4);
__reg_5_2 = __reg_4_2;
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_0, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 3, __reg_8_0);
__reg_5_3 = __reg_4_3;
__CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 2, __reg_8_1);
__reg_6_2 = __reg_5_2;
__CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_0, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 1, __reg_8_2);
__reg_6_3 = __reg_5_3;
__CALC7(__reg_7_4, __reg_7_4, __reg_7_4, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h + 0, __reg_8_3);
__reg_7_2 = __reg_6_2;
__CALC7(__reg_7_0, __reg_7_0, __reg_7_0, __reg_7_0, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h + 1, __reg_8_4);
__reg_7_3 = __reg_6_3;
__CALC8(__reg_8_4, __reg_8_4, __reg_8_4, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h + 2, __reg_8_0);
__CALC8(__reg_8_0, __reg_8_0, __reg_8_0, __reg_8_0, __reg_8_1, __reg_7_3);
__STORE(__h + 3, __reg_8_1);
}
}
else
{
for (__h = 33; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 16, __reg_8_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 16, __reg_8_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 16, __reg_8_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 16, __reg_8_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 16, __reg_8_1);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 16, __reg_8_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 16, __reg_8_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 16, __reg_8_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 16, __reg_8_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 16, __reg_8_1);
__h++;
}
}
__global__ void kernel0_7(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 7;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 484;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_1_3;
float __reg_1_4;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_2_3;
float __reg_2_4;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_3_3;
float __reg_3_4;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_4_3;
float __reg_4_4;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
float __reg_5_3;
float __reg_5_4;
float __reg_6_0;
float __reg_6_1;
float __reg_6_2;
float __reg_6_3;
float __reg_6_4;
float __reg_7_0;
float __reg_7_1;
float __reg_7_2;
float __reg_7_3;
float __reg_7_4;
__shared__ float __a_sb_double[__blockSize * 2];
float *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __storeValid = __writeValid7;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { float etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { float etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC3(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC4(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC5(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC6(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid6) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC7(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid7) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_0);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_0);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_0);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_0);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_0);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_0);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_0);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_0);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(2, __reg_7_2);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(3, __reg_7_3);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(4, __reg_7_4);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(5, __reg_7_0);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(6, __reg_7_1);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(7, __reg_7_2);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(8, __reg_7_3);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(9, __reg_7_4);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(10, __reg_7_0);
__LOAD(__reg_0, 25);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(11, __reg_7_1);
__LOAD(__reg_0, 26);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(12, __reg_7_2);
__LOAD(__reg_0, 27);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(13, __reg_7_3);
__LOAD(__reg_0, 28);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(14, __reg_7_4);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__LOAD(__reg_0, 25);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__LOAD(__reg_0, 26);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__LOAD(__reg_0, 27);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__LOAD(__reg_0, 28);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(14, __reg_7_4);
}
__a_sb = __a_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 29; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 14, __reg_7_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 14, __reg_7_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 14, __reg_7_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 14, __reg_7_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 14, __reg_7_4);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 14, __reg_7_0);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 13, __reg_7_1);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 12, __reg_7_2);
__reg_2_4 = __reg_1_4;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 11, __reg_7_3);
__reg_2_0 = __reg_1_0;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 10, __reg_7_4);
__reg_3_4 = __reg_2_4;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 9, __reg_7_0);
__reg_3_0 = __reg_2_0;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 8, __reg_7_1);
__reg_4_4 = __reg_3_4;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 7, __reg_7_2);
__reg_4_0 = __reg_3_0;
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 6, __reg_7_3);
__reg_5_4 = __reg_4_4;
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_2, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 5, __reg_7_4);
__reg_5_0 = __reg_4_0;
__CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 4, __reg_7_0);
__reg_6_4 = __reg_5_4;
__CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_2, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 3, __reg_7_1);
__reg_6_0 = __reg_5_0;
__CALC7(__reg_7_1, __reg_7_1, __reg_7_1, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 2, __reg_7_2);
__CALC7(__reg_7_2, __reg_7_2, __reg_7_2, __reg_7_2, __reg_7_3, __reg_6_0);
__STORE(__h - 1, __reg_7_3);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 14, __reg_7_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 13, __reg_7_1);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 12, __reg_7_2);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 11, __reg_7_3);
__reg_2_0 = __reg_1_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 10, __reg_7_4);
__reg_2_1 = __reg_1_1;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 9, __reg_7_0);
__reg_3_0 = __reg_2_0;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 8, __reg_7_1);
__reg_3_1 = __reg_2_1;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 7, __reg_7_2);
__reg_4_0 = __reg_3_0;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 6, __reg_7_3);
__reg_4_1 = __reg_3_1;
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 5, __reg_7_4);
__reg_5_0 = __reg_4_0;
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_3, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 4, __reg_7_0);
__reg_5_1 = __reg_4_1;
__CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 3, __reg_7_1);
__reg_6_0 = __reg_5_0;
__CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_3, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 2, __reg_7_2);
__reg_6_1 = __reg_5_1;
__CALC7(__reg_7_2, __reg_7_2, __reg_7_2, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 1, __reg_7_3);
__CALC7(__reg_7_3, __reg_7_3, __reg_7_3, __reg_7_3, __reg_7_4, __reg_6_1);
__STORE(__h + 0, __reg_7_4);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 14, __reg_7_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 13, __reg_7_1);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 12, __reg_7_2);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 11, __reg_7_3);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 10, __reg_7_4);
__reg_2_1 = __reg_1_1;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 9, __reg_7_0);
__reg_2_2 = __reg_1_2;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 8, __reg_7_1);
__reg_3_1 = __reg_2_1;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 7, __reg_7_2);
__reg_3_2 = __reg_2_2;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 6, __reg_7_3);
__reg_4_1 = __reg_3_1;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 5, __reg_7_4);
__reg_4_2 = __reg_3_2;
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 4, __reg_7_0);
__reg_5_1 = __reg_4_1;
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_4, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 3, __reg_7_1);
__reg_5_2 = __reg_4_2;
__CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 2, __reg_7_2);
__reg_6_1 = __reg_5_1;
__CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_4, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 1, __reg_7_3);
__reg_6_2 = __reg_5_2;
__CALC7(__reg_7_3, __reg_7_3, __reg_7_3, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h + 0, __reg_7_4);
__CALC7(__reg_7_4, __reg_7_4, __reg_7_4, __reg_7_4, __reg_7_0, __reg_6_2);
__STORE(__h + 1, __reg_7_0);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 14, __reg_7_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 13, __reg_7_1);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 12, __reg_7_2);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 11, __reg_7_3);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 10, __reg_7_4);
__reg_1_3 = __reg_0;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 9, __reg_7_0);
__reg_2_2 = __reg_1_2;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 8, __reg_7_1);
__reg_2_3 = __reg_1_3;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 7, __reg_7_2);
__reg_3_2 = __reg_2_2;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 6, __reg_7_3);
__reg_3_3 = __reg_2_3;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 5, __reg_7_4);
__reg_4_2 = __reg_3_2;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 4, __reg_7_0);
__reg_4_3 = __reg_3_3;
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 3, __reg_7_1);
__reg_5_2 = __reg_4_2;
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_0, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 2, __reg_7_2);
__reg_5_3 = __reg_4_3;
__CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 1, __reg_7_3);
__reg_6_2 = __reg_5_2;
__CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_0, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h + 0, __reg_7_4);
__reg_6_3 = __reg_5_3;
__CALC7(__reg_7_4, __reg_7_4, __reg_7_4, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h + 1, __reg_7_0);
__CALC7(__reg_7_0, __reg_7_0, __reg_7_0, __reg_7_0, __reg_7_1, __reg_6_3);
__STORE(__h + 2, __reg_7_1);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 14, __reg_7_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 13, __reg_7_1);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 12, __reg_7_2);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 11, __reg_7_3);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 10, __reg_7_4);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 9, __reg_7_0);
__reg_1_4 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 8, __reg_7_1);
__reg_2_3 = __reg_1_3;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 7, __reg_7_2);
__reg_2_4 = __reg_1_4;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 6, __reg_7_3);
__reg_3_3 = __reg_2_3;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 5, __reg_7_4);
__reg_3_4 = __reg_2_4;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 4, __reg_7_0);
__reg_4_3 = __reg_3_3;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 3, __reg_7_1);
__reg_4_4 = __reg_3_4;
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 2, __reg_7_2);
__reg_5_3 = __reg_4_3;
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_1, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 1, __reg_7_3);
__reg_5_4 = __reg_4_4;
__CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h + 0, __reg_7_4);
__reg_6_3 = __reg_5_3;
__CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_1, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h + 1, __reg_7_0);
__reg_6_4 = __reg_5_4;
__CALC7(__reg_7_0, __reg_7_0, __reg_7_0, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h + 2, __reg_7_1);
__CALC7(__reg_7_1, __reg_7_1, __reg_7_1, __reg_7_1, __reg_7_2, __reg_6_4);
__STORE(__h + 3, __reg_7_2);
}
}
else
{
for (__h = 29; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 14, __reg_7_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 14, __reg_7_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 14, __reg_7_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 14, __reg_7_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 14, __reg_7_4);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 14, __reg_7_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 14, __reg_7_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 14, __reg_7_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 14, __reg_7_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 14, __reg_7_4);
__h++;
}
}
__global__ void kernel0_6(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 6;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 488;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_1_3;
float __reg_1_4;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_2_3;
float __reg_2_4;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_3_3;
float __reg_3_4;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_4_3;
float __reg_4_4;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
float __reg_5_3;
float __reg_5_4;
float __reg_6_0;
float __reg_6_1;
float __reg_6_2;
float __reg_6_3;
float __reg_6_4;
__shared__ float __a_sb_double[__blockSize * 2];
float *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __storeValid = __writeValid6;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { float etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { float etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC3(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC4(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC5(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC6(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid6) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_0);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_0);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_0);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_0);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_0);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_0);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(2, __reg_6_2);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(3, __reg_6_3);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(4, __reg_6_4);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(5, __reg_6_0);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(6, __reg_6_1);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(7, __reg_6_2);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(8, __reg_6_3);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(9, __reg_6_4);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(10, __reg_6_0);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(11, __reg_6_1);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(12, __reg_6_2);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(12, __reg_6_2);
}
__a_sb = __a_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 25; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 12, __reg_6_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 12, __reg_6_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 12, __reg_6_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 12, __reg_6_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 12, __reg_6_2);
__h++;
}
if (0) {}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 12, __reg_6_3);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 11, __reg_6_4);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 10, __reg_6_0);
__reg_2_0 = __reg_1_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 9, __reg_6_1);
__reg_2_1 = __reg_1_1;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 8, __reg_6_2);
__reg_3_0 = __reg_2_0;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 7, __reg_6_3);
__reg_3_1 = __reg_2_1;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 6, __reg_6_4);
__reg_4_0 = __reg_3_0;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 5, __reg_6_0);
__reg_4_1 = __reg_3_1;
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 4, __reg_6_1);
__reg_5_0 = __reg_4_0;
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_3, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 3, __reg_6_2);
__reg_5_1 = __reg_4_1;
__CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 2, __reg_6_3);
__CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_3, __reg_6_4, __reg_5_1);
__STORE(__h - 1, __reg_6_4);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 12, __reg_6_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 11, __reg_6_4);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 10, __reg_6_0);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 9, __reg_6_1);
__reg_2_1 = __reg_1_1;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 8, __reg_6_2);
__reg_2_2 = __reg_1_2;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 7, __reg_6_3);
__reg_3_1 = __reg_2_1;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 6, __reg_6_4);
__reg_3_2 = __reg_2_2;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 5, __reg_6_0);
__reg_4_1 = __reg_3_1;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 4, __reg_6_1);
__reg_4_2 = __reg_3_2;
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 3, __reg_6_2);
__reg_5_1 = __reg_4_1;
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_4, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 2, __reg_6_3);
__reg_5_2 = __reg_4_2;
__CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 1, __reg_6_4);
__CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_4, __reg_6_0, __reg_5_2);
__STORE(__h + 0, __reg_6_0);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 12, __reg_6_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 11, __reg_6_4);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 10, __reg_6_0);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 9, __reg_6_1);
__reg_1_3 = __reg_0;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 8, __reg_6_2);
__reg_2_2 = __reg_1_2;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 7, __reg_6_3);
__reg_2_3 = __reg_1_3;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 6, __reg_6_4);
__reg_3_2 = __reg_2_2;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 5, __reg_6_0);
__reg_3_3 = __reg_2_3;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 4, __reg_6_1);
__reg_4_2 = __reg_3_2;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 3, __reg_6_2);
__reg_4_3 = __reg_3_3;
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 2, __reg_6_3);
__reg_5_2 = __reg_4_2;
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_0, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 1, __reg_6_4);
__reg_5_3 = __reg_4_3;
__CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h + 0, __reg_6_0);
__CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_0, __reg_6_1, __reg_5_3);
__STORE(__h + 1, __reg_6_1);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 12, __reg_6_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 11, __reg_6_4);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 10, __reg_6_0);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 9, __reg_6_1);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 8, __reg_6_2);
__reg_1_4 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 7, __reg_6_3);
__reg_2_3 = __reg_1_3;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 6, __reg_6_4);
__reg_2_4 = __reg_1_4;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 5, __reg_6_0);
__reg_3_3 = __reg_2_3;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 4, __reg_6_1);
__reg_3_4 = __reg_2_4;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 3, __reg_6_2);
__reg_4_3 = __reg_3_3;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 2, __reg_6_3);
__reg_4_4 = __reg_3_4;
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 1, __reg_6_4);
__reg_5_3 = __reg_4_3;
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_1, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h + 0, __reg_6_0);
__reg_5_4 = __reg_4_4;
__CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h + 1, __reg_6_1);
__CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_1, __reg_6_2, __reg_5_4);
__STORE(__h + 2, __reg_6_2);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 12, __reg_6_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 11, __reg_6_4);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 10, __reg_6_0);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 9, __reg_6_1);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 8, __reg_6_2);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 7, __reg_6_3);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 6, __reg_6_4);
__reg_2_4 = __reg_1_4;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 5, __reg_6_0);
__reg_2_0 = __reg_1_0;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 4, __reg_6_1);
__reg_3_4 = __reg_2_4;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 3, __reg_6_2);
__reg_3_0 = __reg_2_0;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 2, __reg_6_3);
__reg_4_4 = __reg_3_4;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 1, __reg_6_4);
__reg_4_0 = __reg_3_0;
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h + 0, __reg_6_0);
__reg_5_4 = __reg_4_4;
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_2, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h + 1, __reg_6_1);
__reg_5_0 = __reg_4_0;
__CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h + 2, __reg_6_2);
__CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_2, __reg_6_3, __reg_5_0);
__STORE(__h + 3, __reg_6_3);
}
}
else
{
for (__h = 25; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 12, __reg_6_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 12, __reg_6_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 12, __reg_6_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 12, __reg_6_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 12, __reg_6_2);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 12, __reg_6_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 12, __reg_6_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 12, __reg_6_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 12, __reg_6_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 12, __reg_6_2);
__h++;
}
}
__global__ void kernel0_5(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 492;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_1_3;
float __reg_1_4;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_2_3;
float __reg_2_4;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_3_3;
float __reg_3_4;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_4_3;
float __reg_4_4;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
float __reg_5_3;
float __reg_5_4;
__shared__ float __a_sb_double[__blockSize * 2];
float *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __storeValid = __writeValid5;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { float etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { float etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC3(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC4(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC5(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_0);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_0);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_0);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_0);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(2, __reg_5_2);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(3, __reg_5_3);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(4, __reg_5_4);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(5, __reg_5_0);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(6, __reg_5_1);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(7, __reg_5_2);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(8, __reg_5_3);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(9, __reg_5_4);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(10, __reg_5_0);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(10, __reg_5_0);
}
__a_sb = __a_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 21; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 10, __reg_5_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 10, __reg_5_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 10, __reg_5_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 10, __reg_5_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 10, __reg_5_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 10, __reg_5_1);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 9, __reg_5_2);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 8, __reg_5_3);
__reg_2_1 = __reg_1_1;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 7, __reg_5_4);
__reg_2_2 = __reg_1_2;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 6, __reg_5_0);
__reg_3_1 = __reg_2_1;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 5, __reg_5_1);
__reg_3_2 = __reg_2_2;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 4, __reg_5_2);
__reg_4_1 = __reg_3_1;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 3, __reg_5_3);
__reg_4_2 = __reg_3_2;
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 2, __reg_5_4);
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_4, __reg_5_0, __reg_4_2);
__STORE(__h - 1, __reg_5_0);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 10, __reg_5_1);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 9, __reg_5_2);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 8, __reg_5_3);
__reg_1_3 = __reg_0;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 7, __reg_5_4);
__reg_2_2 = __reg_1_2;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 6, __reg_5_0);
__reg_2_3 = __reg_1_3;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 5, __reg_5_1);
__reg_3_2 = __reg_2_2;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 4, __reg_5_2);
__reg_3_3 = __reg_2_3;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 3, __reg_5_3);
__reg_4_2 = __reg_3_2;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 2, __reg_5_4);
__reg_4_3 = __reg_3_3;
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 1, __reg_5_0);
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_0, __reg_5_1, __reg_4_3);
__STORE(__h + 0, __reg_5_1);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 10, __reg_5_1);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 9, __reg_5_2);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 8, __reg_5_3);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 7, __reg_5_4);
__reg_1_4 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 6, __reg_5_0);
__reg_2_3 = __reg_1_3;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 5, __reg_5_1);
__reg_2_4 = __reg_1_4;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 4, __reg_5_2);
__reg_3_3 = __reg_2_3;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 3, __reg_5_3);
__reg_3_4 = __reg_2_4;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 2, __reg_5_4);
__reg_4_3 = __reg_3_3;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 1, __reg_5_0);
__reg_4_4 = __reg_3_4;
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h + 0, __reg_5_1);
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_1, __reg_5_2, __reg_4_4);
__STORE(__h + 1, __reg_5_2);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 10, __reg_5_1);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 9, __reg_5_2);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 8, __reg_5_3);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 7, __reg_5_4);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 6, __reg_5_0);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 5, __reg_5_1);
__reg_2_4 = __reg_1_4;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 4, __reg_5_2);
__reg_2_0 = __reg_1_0;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 3, __reg_5_3);
__reg_3_4 = __reg_2_4;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 2, __reg_5_4);
__reg_3_0 = __reg_2_0;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 1, __reg_5_0);
__reg_4_4 = __reg_3_4;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h + 0, __reg_5_1);
__reg_4_0 = __reg_3_0;
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h + 1, __reg_5_2);
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_2, __reg_5_3, __reg_4_0);
__STORE(__h + 2, __reg_5_3);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 10, __reg_5_1);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 9, __reg_5_2);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 8, __reg_5_3);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 7, __reg_5_4);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 6, __reg_5_0);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 5, __reg_5_1);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 4, __reg_5_2);
__reg_2_0 = __reg_1_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 3, __reg_5_3);
__reg_2_1 = __reg_1_1;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 2, __reg_5_4);
__reg_3_0 = __reg_2_0;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 1, __reg_5_0);
__reg_3_1 = __reg_2_1;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h + 0, __reg_5_1);
__reg_4_0 = __reg_3_0;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h + 1, __reg_5_2);
__reg_4_1 = __reg_3_1;
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h + 2, __reg_5_3);
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_3, __reg_5_4, __reg_4_1);
__STORE(__h + 3, __reg_5_4);
}
}
else
{
for (__h = 21; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 10, __reg_5_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 10, __reg_5_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 10, __reg_5_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 10, __reg_5_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 10, __reg_5_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 10, __reg_5_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 10, __reg_5_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 10, __reg_5_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 10, __reg_5_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 10, __reg_5_0);
__h++;
}
}
__global__ void kernel0_4(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_1_3;
float __reg_1_4;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_2_3;
float __reg_2_4;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_3_3;
float __reg_3_4;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_4_3;
float __reg_4_4;
__shared__ float __a_sb_double[__blockSize * 2];
float *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __storeValid = __writeValid4;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { float etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { float etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC3(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC4(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_0);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_0);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(2, __reg_4_2);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(3, __reg_4_3);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(4, __reg_4_4);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(5, __reg_4_0);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(6, __reg_4_1);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(7, __reg_4_2);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(8, __reg_4_3);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(8, __reg_4_3);
}
__a_sb = __a_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 17; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 8, __reg_4_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 8, __reg_4_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 8, __reg_4_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h - 8, __reg_4_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(__h - 8, __reg_4_3);
__h++;
}
if (0) {}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 8, __reg_4_4);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 7, __reg_4_0);
__reg_1_3 = __reg_0;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 6, __reg_4_1);
__reg_2_2 = __reg_1_2;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h - 5, __reg_4_2);
__reg_2_3 = __reg_1_3;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(__h - 4, __reg_4_3);
__reg_3_2 = __reg_2_2;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 3, __reg_4_4);
__reg_3_3 = __reg_2_3;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 2, __reg_4_0);
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_1, __reg_3_3);
__STORE(__h - 1, __reg_4_1);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 8, __reg_4_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 7, __reg_4_0);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 6, __reg_4_1);
__reg_1_4 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h - 5, __reg_4_2);
__reg_2_3 = __reg_1_3;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(__h - 4, __reg_4_3);
__reg_2_4 = __reg_1_4;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 3, __reg_4_4);
__reg_3_3 = __reg_2_3;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 2, __reg_4_0);
__reg_3_4 = __reg_2_4;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 1, __reg_4_1);
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_2, __reg_3_4);
__STORE(__h + 0, __reg_4_2);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 8, __reg_4_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 7, __reg_4_0);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 6, __reg_4_1);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h - 5, __reg_4_2);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(__h - 4, __reg_4_3);
__reg_2_4 = __reg_1_4;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 3, __reg_4_4);
__reg_2_0 = __reg_1_0;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 2, __reg_4_0);
__reg_3_4 = __reg_2_4;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 1, __reg_4_1);
__reg_3_0 = __reg_2_0;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h + 0, __reg_4_2);
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_3, __reg_3_0);
__STORE(__h + 1, __reg_4_3);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 8, __reg_4_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 7, __reg_4_0);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 6, __reg_4_1);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h - 5, __reg_4_2);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(__h - 4, __reg_4_3);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 3, __reg_4_4);
__reg_2_0 = __reg_1_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 2, __reg_4_0);
__reg_2_1 = __reg_1_1;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 1, __reg_4_1);
__reg_3_0 = __reg_2_0;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h + 0, __reg_4_2);
__reg_3_1 = __reg_2_1;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(__h + 1, __reg_4_3);
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_4, __reg_3_1);
__STORE(__h + 2, __reg_4_4);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 8, __reg_4_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 7, __reg_4_0);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 6, __reg_4_1);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h - 5, __reg_4_2);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(__h - 4, __reg_4_3);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 3, __reg_4_4);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 2, __reg_4_0);
__reg_2_1 = __reg_1_1;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 1, __reg_4_1);
__reg_2_2 = __reg_1_2;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h + 0, __reg_4_2);
__reg_3_1 = __reg_2_1;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(__h + 1, __reg_4_3);
__reg_3_2 = __reg_2_2;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h + 2, __reg_4_4);
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_0, __reg_3_2);
__STORE(__h + 3, __reg_4_0);
}
}
else
{
for (__h = 17; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 8, __reg_4_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 8, __reg_4_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 8, __reg_4_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h - 8, __reg_4_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(__h - 8, __reg_4_3);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 8, __reg_4_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 8, __reg_4_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 8, __reg_4_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h - 8, __reg_4_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(__h - 8, __reg_4_3);
__h++;
}
}
__global__ void kernel0_3(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_1_3;
float __reg_1_4;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_2_3;
float __reg_2_4;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_3_3;
float __reg_3_4;
__shared__ float __a_sb_double[__blockSize * 2];
float *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __storeValid = __writeValid3;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { float etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { float etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC3(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(2, __reg_3_2);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(3, __reg_3_3);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(4, __reg_3_4);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__STORE(5, __reg_3_0);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__STORE(6, __reg_3_1);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__STORE(6, __reg_3_1);
}
__a_sb = __a_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 13; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 6, __reg_3_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h - 6, __reg_3_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(__h - 6, __reg_3_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__STORE(__h - 6, __reg_3_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__STORE(__h - 6, __reg_3_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 6, __reg_3_2);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h - 5, __reg_3_3);
__reg_1_4 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(__h - 4, __reg_3_4);
__reg_2_3 = __reg_1_3;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__STORE(__h - 3, __reg_3_0);
__reg_2_4 = __reg_1_4;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_3);
__STORE(__h - 2, __reg_3_1);
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_4);
__STORE(__h - 1, __reg_3_2);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 6, __reg_3_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h - 5, __reg_3_3);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(__h - 4, __reg_3_4);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__STORE(__h - 3, __reg_3_0);
__reg_2_4 = __reg_1_4;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__STORE(__h - 2, __reg_3_1);
__reg_2_0 = __reg_1_0;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 1, __reg_3_2);
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_0);
__STORE(__h + 0, __reg_3_3);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 6, __reg_3_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h - 5, __reg_3_3);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(__h - 4, __reg_3_4);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__STORE(__h - 3, __reg_3_0);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__STORE(__h - 2, __reg_3_1);
__reg_2_0 = __reg_1_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 1, __reg_3_2);
__reg_2_1 = __reg_1_1;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h + 0, __reg_3_3);
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_1);
__STORE(__h + 1, __reg_3_4);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 6, __reg_3_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h - 5, __reg_3_3);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(__h - 4, __reg_3_4);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__STORE(__h - 3, __reg_3_0);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__STORE(__h - 2, __reg_3_1);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 1, __reg_3_2);
__reg_2_1 = __reg_1_1;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h + 0, __reg_3_3);
__reg_2_2 = __reg_1_2;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(__h + 1, __reg_3_4);
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_0, __reg_2_2);
__STORE(__h + 2, __reg_3_0);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 6, __reg_3_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h - 5, __reg_3_3);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(__h - 4, __reg_3_4);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__STORE(__h - 3, __reg_3_0);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__STORE(__h - 2, __reg_3_1);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 1, __reg_3_2);
__reg_1_3 = __reg_0;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h + 0, __reg_3_3);
__reg_2_2 = __reg_1_2;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(__h + 1, __reg_3_4);
__reg_2_3 = __reg_1_3;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_1, __reg_3_0, __reg_2_2);
__STORE(__h + 2, __reg_3_0);
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_3);
__STORE(__h + 3, __reg_3_1);
}
}
else
{
for (__h = 13; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 6, __reg_3_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h - 6, __reg_3_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(__h - 6, __reg_3_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__STORE(__h - 6, __reg_3_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__STORE(__h - 6, __reg_3_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 6, __reg_3_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h - 6, __reg_3_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(__h - 6, __reg_3_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__STORE(__h - 6, __reg_3_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__STORE(__h - 6, __reg_3_1);
__h++;
}
}
__global__ void kernel0_2(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_1_3;
float __reg_1_4;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_2_3;
float __reg_2_4;
__shared__ float __a_sb_double[__blockSize * 2];
float *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __storeValid = __writeValid2;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { float etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { float etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__STORE(2, __reg_2_2);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__STORE(3, __reg_2_3);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__STORE(4, __reg_2_4);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__STORE(4, __reg_2_4);
}
__a_sb = __a_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__STORE(__h - 4, __reg_2_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__STORE(__h - 4, __reg_2_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__STORE(__h - 4, __reg_2_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__STORE(__h - 4, __reg_2_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__STORE(__h - 4, __reg_2_4);
__h++;
}
if (0) {}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__STORE(__h - 4, __reg_2_0);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__STORE(__h - 3, __reg_2_1);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4);
__STORE(__h - 2, __reg_2_2);
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0);
__STORE(__h - 1, __reg_2_3);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__STORE(__h - 4, __reg_2_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__STORE(__h - 3, __reg_2_1);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__STORE(__h - 2, __reg_2_2);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0);
__STORE(__h - 1, __reg_2_3);
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1);
__STORE(__h + 0, __reg_2_4);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__STORE(__h - 4, __reg_2_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__STORE(__h - 3, __reg_2_1);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__STORE(__h - 2, __reg_2_2);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__STORE(__h - 1, __reg_2_3);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1);
__STORE(__h + 0, __reg_2_4);
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2);
__STORE(__h + 1, __reg_2_0);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__STORE(__h - 4, __reg_2_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__STORE(__h - 3, __reg_2_1);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__STORE(__h - 2, __reg_2_2);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__STORE(__h - 1, __reg_2_3);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__STORE(__h + 0, __reg_2_4);
__reg_1_3 = __reg_0;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2);
__STORE(__h + 1, __reg_2_0);
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3);
__STORE(__h + 2, __reg_2_1);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__STORE(__h - 4, __reg_2_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__STORE(__h - 3, __reg_2_1);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__STORE(__h - 2, __reg_2_2);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__STORE(__h - 1, __reg_2_3);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__STORE(__h + 0, __reg_2_4);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__STORE(__h + 1, __reg_2_0);
__reg_1_4 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3);
__STORE(__h + 2, __reg_2_1);
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4);
__STORE(__h + 3, __reg_2_2);
}
}
else
{
for (__h = 9; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__STORE(__h - 4, __reg_2_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__STORE(__h - 4, __reg_2_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__STORE(__h - 4, __reg_2_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__STORE(__h - 4, __reg_2_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__STORE(__h - 4, __reg_2_4);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__STORE(__h - 4, __reg_2_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__STORE(__h - 4, __reg_2_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__STORE(__h - 4, __reg_2_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__STORE(__h - 4, __reg_2_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__STORE(__h - 4, __reg_2_4);
__h++;
}
}
__global__ void kernel0_1(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_1_3;
float __reg_1_4;
__shared__ float __a_sb_double[__blockSize * 2];
float *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __storeValid = __writeValid1;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { float etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { float etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(2, __reg_1_2);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(2, __reg_1_2);
}
__a_sb = __a_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__STORE(__h - 2, __reg_1_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 2, __reg_1_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 2, __reg_1_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h - 2, __reg_1_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__STORE(__h - 1, __reg_1_4);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0);
__STORE(__h - 1, __reg_1_4);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__STORE(__h - 1, __reg_1_4);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__STORE(__h - 1, __reg_1_4);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__STORE(__h + 2, __reg_1_2);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__STORE(__h - 1, __reg_1_4);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h + 2, __reg_1_2);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__STORE(__h + 3, __reg_1_3);
}
}
else
{
for (__h = 5; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__STORE(__h - 2, __reg_1_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 2, __reg_1_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 2, __reg_1_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h - 2, __reg_1_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__STORE(__h - 2, __reg_1_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 2, __reg_1_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 2, __reg_1_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h - 2, __reg_1_2);
__h++;
}
}
| 4d40e9f7ad9d68c8f40a9d655ce2267265fb0c41.cu | #include "box2d2r-512-9-512_kernel.hu"
__device__ float __sbref_wrap(float *sb, size_t index) { return sb[index]; }
__global__ void kernel0_9(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 9;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 476;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_1_3;
float __reg_1_4;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_2_3;
float __reg_2_4;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_3_3;
float __reg_3_4;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_4_3;
float __reg_4_4;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
float __reg_5_3;
float __reg_5_4;
float __reg_6_0;
float __reg_6_1;
float __reg_6_2;
float __reg_6_3;
float __reg_6_4;
float __reg_7_0;
float __reg_7_1;
float __reg_7_2;
float __reg_7_3;
float __reg_7_4;
float __reg_8_0;
float __reg_8_1;
float __reg_8_2;
float __reg_8_3;
float __reg_8_4;
float __reg_9_0;
float __reg_9_1;
float __reg_9_2;
float __reg_9_3;
float __reg_9_4;
__shared__ float __a_sb_double[__blockSize * 2];
float *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9);
const AN5D_TYPE __storeValid = __writeValid9;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { float etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { float etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC3(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC4(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC5(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC6(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid6) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC7(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid7) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC8(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid8) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC9(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid9) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_0);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_0);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_0);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_0);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_0);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_0);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_0);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_0);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_0);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_0);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_0);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_0);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(2, __reg_9_2);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(3, __reg_9_3);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(4, __reg_9_4);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(5, __reg_9_0);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(6, __reg_9_1);
__LOAD(__reg_0, 25);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(7, __reg_9_2);
__LOAD(__reg_0, 26);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(8, __reg_9_3);
__LOAD(__reg_0, 27);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(9, __reg_9_4);
__LOAD(__reg_0, 28);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(10, __reg_9_0);
__LOAD(__reg_0, 29);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(11, __reg_9_1);
__LOAD(__reg_0, 30);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(12, __reg_9_2);
__LOAD(__reg_0, 31);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(13, __reg_9_3);
__LOAD(__reg_0, 32);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(14, __reg_9_4);
__LOAD(__reg_0, 33);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(15, __reg_9_0);
__LOAD(__reg_0, 34);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(16, __reg_9_1);
__LOAD(__reg_0, 35);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(17, __reg_9_2);
__LOAD(__reg_0, 36);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(18, __reg_9_3);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__LOAD(__reg_0, 25);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__LOAD(__reg_0, 26);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__LOAD(__reg_0, 27);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__LOAD(__reg_0, 28);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__LOAD(__reg_0, 29);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__LOAD(__reg_0, 30);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__LOAD(__reg_0, 31);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__LOAD(__reg_0, 32);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__LOAD(__reg_0, 33);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__LOAD(__reg_0, 34);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__LOAD(__reg_0, 35);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__LOAD(__reg_0, 36);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(18, __reg_9_3);
}
__a_sb = __a_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 37; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 18, __reg_9_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 18, __reg_9_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 18, __reg_9_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 18, __reg_9_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 18, __reg_9_3);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 18, __reg_9_4);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 17, __reg_9_0);
__reg_1_3 = __reg_0;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 16, __reg_9_1);
__reg_2_2 = __reg_1_2;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 15, __reg_9_2);
__reg_2_3 = __reg_1_3;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 14, __reg_9_3);
__reg_3_2 = __reg_2_2;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 13, __reg_9_4);
__reg_3_3 = __reg_2_3;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 12, __reg_9_0);
__reg_4_2 = __reg_3_2;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 11, __reg_9_1);
__reg_4_3 = __reg_3_3;
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 10, __reg_9_2);
__reg_5_2 = __reg_4_2;
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_0, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 9, __reg_9_3);
__reg_5_3 = __reg_4_3;
__CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 8, __reg_9_4);
__reg_6_2 = __reg_5_2;
__CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_0, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 7, __reg_9_0);
__reg_6_3 = __reg_5_3;
__CALC7(__reg_7_4, __reg_7_4, __reg_7_4, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 6, __reg_9_1);
__reg_7_2 = __reg_6_2;
__CALC7(__reg_7_0, __reg_7_0, __reg_7_0, __reg_7_0, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 5, __reg_9_2);
__reg_7_3 = __reg_6_3;
__CALC8(__reg_8_4, __reg_8_4, __reg_8_4, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 4, __reg_9_3);
__reg_8_2 = __reg_7_2;
__CALC8(__reg_8_0, __reg_8_0, __reg_8_0, __reg_8_0, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 3, __reg_9_4);
__reg_8_3 = __reg_7_3;
__CALC9(__reg_9_4, __reg_9_4, __reg_9_4, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 2, __reg_9_0);
__CALC9(__reg_9_0, __reg_9_0, __reg_9_0, __reg_9_0, __reg_9_1, __reg_8_3);
__STORE(__h - 1, __reg_9_1);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 18, __reg_9_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 17, __reg_9_0);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 16, __reg_9_1);
__reg_1_4 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 15, __reg_9_2);
__reg_2_3 = __reg_1_3;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 14, __reg_9_3);
__reg_2_4 = __reg_1_4;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 13, __reg_9_4);
__reg_3_3 = __reg_2_3;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 12, __reg_9_0);
__reg_3_4 = __reg_2_4;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 11, __reg_9_1);
__reg_4_3 = __reg_3_3;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 10, __reg_9_2);
__reg_4_4 = __reg_3_4;
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 9, __reg_9_3);
__reg_5_3 = __reg_4_3;
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_1, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 8, __reg_9_4);
__reg_5_4 = __reg_4_4;
__CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 7, __reg_9_0);
__reg_6_3 = __reg_5_3;
__CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_1, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 6, __reg_9_1);
__reg_6_4 = __reg_5_4;
__CALC7(__reg_7_0, __reg_7_0, __reg_7_0, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 5, __reg_9_2);
__reg_7_3 = __reg_6_3;
__CALC7(__reg_7_1, __reg_7_1, __reg_7_1, __reg_7_1, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 4, __reg_9_3);
__reg_7_4 = __reg_6_4;
__CALC8(__reg_8_0, __reg_8_0, __reg_8_0, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 3, __reg_9_4);
__reg_8_3 = __reg_7_3;
__CALC8(__reg_8_1, __reg_8_1, __reg_8_1, __reg_8_1, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 2, __reg_9_0);
__reg_8_4 = __reg_7_4;
__CALC9(__reg_9_0, __reg_9_0, __reg_9_0, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 1, __reg_9_1);
__CALC9(__reg_9_1, __reg_9_1, __reg_9_1, __reg_9_1, __reg_9_2, __reg_8_4);
__STORE(__h + 0, __reg_9_2);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 18, __reg_9_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 17, __reg_9_0);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 16, __reg_9_1);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 15, __reg_9_2);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 14, __reg_9_3);
__reg_2_4 = __reg_1_4;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 13, __reg_9_4);
__reg_2_0 = __reg_1_0;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 12, __reg_9_0);
__reg_3_4 = __reg_2_4;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 11, __reg_9_1);
__reg_3_0 = __reg_2_0;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 10, __reg_9_2);
__reg_4_4 = __reg_3_4;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 9, __reg_9_3);
__reg_4_0 = __reg_3_0;
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 8, __reg_9_4);
__reg_5_4 = __reg_4_4;
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_2, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 7, __reg_9_0);
__reg_5_0 = __reg_4_0;
__CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 6, __reg_9_1);
__reg_6_4 = __reg_5_4;
__CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_2, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 5, __reg_9_2);
__reg_6_0 = __reg_5_0;
__CALC7(__reg_7_1, __reg_7_1, __reg_7_1, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 4, __reg_9_3);
__reg_7_4 = __reg_6_4;
__CALC7(__reg_7_2, __reg_7_2, __reg_7_2, __reg_7_2, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 3, __reg_9_4);
__reg_7_0 = __reg_6_0;
__CALC8(__reg_8_1, __reg_8_1, __reg_8_1, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 2, __reg_9_0);
__reg_8_4 = __reg_7_4;
__CALC8(__reg_8_2, __reg_8_2, __reg_8_2, __reg_8_2, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 1, __reg_9_1);
__reg_8_0 = __reg_7_0;
__CALC9(__reg_9_1, __reg_9_1, __reg_9_1, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h + 0, __reg_9_2);
__CALC9(__reg_9_2, __reg_9_2, __reg_9_2, __reg_9_2, __reg_9_3, __reg_8_0);
__STORE(__h + 1, __reg_9_3);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 18, __reg_9_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 17, __reg_9_0);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 16, __reg_9_1);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 15, __reg_9_2);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 14, __reg_9_3);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 13, __reg_9_4);
__reg_2_0 = __reg_1_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 12, __reg_9_0);
__reg_2_1 = __reg_1_1;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 11, __reg_9_1);
__reg_3_0 = __reg_2_0;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 10, __reg_9_2);
__reg_3_1 = __reg_2_1;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 9, __reg_9_3);
__reg_4_0 = __reg_3_0;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 8, __reg_9_4);
__reg_4_1 = __reg_3_1;
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 7, __reg_9_0);
__reg_5_0 = __reg_4_0;
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_3, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 6, __reg_9_1);
__reg_5_1 = __reg_4_1;
__CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 5, __reg_9_2);
__reg_6_0 = __reg_5_0;
__CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_3, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 4, __reg_9_3);
__reg_6_1 = __reg_5_1;
__CALC7(__reg_7_2, __reg_7_2, __reg_7_2, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 3, __reg_9_4);
__reg_7_0 = __reg_6_0;
__CALC7(__reg_7_3, __reg_7_3, __reg_7_3, __reg_7_3, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 2, __reg_9_0);
__reg_7_1 = __reg_6_1;
__CALC8(__reg_8_2, __reg_8_2, __reg_8_2, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 1, __reg_9_1);
__reg_8_0 = __reg_7_0;
__CALC8(__reg_8_3, __reg_8_3, __reg_8_3, __reg_8_3, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h + 0, __reg_9_2);
__reg_8_1 = __reg_7_1;
__CALC9(__reg_9_2, __reg_9_2, __reg_9_2, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h + 1, __reg_9_3);
__CALC9(__reg_9_3, __reg_9_3, __reg_9_3, __reg_9_3, __reg_9_4, __reg_8_1);
__STORE(__h + 2, __reg_9_4);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 18, __reg_9_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 17, __reg_9_0);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 16, __reg_9_1);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 15, __reg_9_2);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 14, __reg_9_3);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 13, __reg_9_4);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 12, __reg_9_0);
__reg_2_1 = __reg_1_1;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 11, __reg_9_1);
__reg_2_2 = __reg_1_2;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 10, __reg_9_2);
__reg_3_1 = __reg_2_1;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 9, __reg_9_3);
__reg_3_2 = __reg_2_2;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 8, __reg_9_4);
__reg_4_1 = __reg_3_1;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 7, __reg_9_0);
__reg_4_2 = __reg_3_2;
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 6, __reg_9_1);
__reg_5_1 = __reg_4_1;
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_4, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 5, __reg_9_2);
__reg_5_2 = __reg_4_2;
__CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 4, __reg_9_3);
__reg_6_1 = __reg_5_1;
__CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_4, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 3, __reg_9_4);
__reg_6_2 = __reg_5_2;
__CALC7(__reg_7_3, __reg_7_3, __reg_7_3, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 2, __reg_9_0);
__reg_7_1 = __reg_6_1;
__CALC7(__reg_7_4, __reg_7_4, __reg_7_4, __reg_7_4, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 1, __reg_9_1);
__reg_7_2 = __reg_6_2;
__CALC8(__reg_8_3, __reg_8_3, __reg_8_3, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h + 0, __reg_9_2);
__reg_8_1 = __reg_7_1;
__CALC8(__reg_8_4, __reg_8_4, __reg_8_4, __reg_8_4, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h + 1, __reg_9_3);
__reg_8_2 = __reg_7_2;
__CALC9(__reg_9_3, __reg_9_3, __reg_9_3, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h + 2, __reg_9_4);
__CALC9(__reg_9_4, __reg_9_4, __reg_9_4, __reg_9_4, __reg_9_0, __reg_8_2);
__STORE(__h + 3, __reg_9_0);
}
}
else
{
for (__h = 37; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 18, __reg_9_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 18, __reg_9_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 18, __reg_9_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 18, __reg_9_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 18, __reg_9_3);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 18, __reg_9_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 18, __reg_9_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 18, __reg_9_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 18, __reg_9_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 18, __reg_9_3);
__h++;
}
}
__global__ void kernel0_8(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 8;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 480;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_1_3;
float __reg_1_4;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_2_3;
float __reg_2_4;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_3_3;
float __reg_3_4;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_4_3;
float __reg_4_4;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
float __reg_5_3;
float __reg_5_4;
float __reg_6_0;
float __reg_6_1;
float __reg_6_2;
float __reg_6_3;
float __reg_6_4;
float __reg_7_0;
float __reg_7_1;
float __reg_7_2;
float __reg_7_3;
float __reg_7_4;
float __reg_8_0;
float __reg_8_1;
float __reg_8_2;
float __reg_8_3;
float __reg_8_4;
__shared__ float __a_sb_double[__blockSize * 2];
float *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __storeValid = __writeValid8;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { float etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { float etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC3(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC4(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC5(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC6(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid6) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC7(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid7) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC8(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid8) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_0);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_0);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_0);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_0);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_0);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_0);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_0);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_0);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_0);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_0);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(2, __reg_8_2);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(3, __reg_8_3);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(4, __reg_8_4);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(5, __reg_8_0);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(6, __reg_8_1);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(7, __reg_8_2);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(8, __reg_8_3);
__LOAD(__reg_0, 25);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(9, __reg_8_4);
__LOAD(__reg_0, 26);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(10, __reg_8_0);
__LOAD(__reg_0, 27);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(11, __reg_8_1);
__LOAD(__reg_0, 28);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(12, __reg_8_2);
__LOAD(__reg_0, 29);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(13, __reg_8_3);
__LOAD(__reg_0, 30);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(14, __reg_8_4);
__LOAD(__reg_0, 31);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(15, __reg_8_0);
__LOAD(__reg_0, 32);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(16, __reg_8_1);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__LOAD(__reg_0, 25);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__LOAD(__reg_0, 26);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__LOAD(__reg_0, 27);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__LOAD(__reg_0, 28);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__LOAD(__reg_0, 29);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__LOAD(__reg_0, 30);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__LOAD(__reg_0, 31);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__LOAD(__reg_0, 32);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(16, __reg_8_1);
}
__a_sb = __a_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 33; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 16, __reg_8_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 16, __reg_8_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 16, __reg_8_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 16, __reg_8_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 16, __reg_8_1);
__h++;
}
if (0) {}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 16, __reg_8_2);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 15, __reg_8_3);
__reg_1_4 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 14, __reg_8_4);
__reg_2_3 = __reg_1_3;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 13, __reg_8_0);
__reg_2_4 = __reg_1_4;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 12, __reg_8_1);
__reg_3_3 = __reg_2_3;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 11, __reg_8_2);
__reg_3_4 = __reg_2_4;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 10, __reg_8_3);
__reg_4_3 = __reg_3_3;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 9, __reg_8_4);
__reg_4_4 = __reg_3_4;
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 8, __reg_8_0);
__reg_5_3 = __reg_4_3;
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_1, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 7, __reg_8_1);
__reg_5_4 = __reg_4_4;
__CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 6, __reg_8_2);
__reg_6_3 = __reg_5_3;
__CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_1, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 5, __reg_8_3);
__reg_6_4 = __reg_5_4;
__CALC7(__reg_7_0, __reg_7_0, __reg_7_0, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 4, __reg_8_4);
__reg_7_3 = __reg_6_3;
__CALC7(__reg_7_1, __reg_7_1, __reg_7_1, __reg_7_1, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 3, __reg_8_0);
__reg_7_4 = __reg_6_4;
__CALC8(__reg_8_0, __reg_8_0, __reg_8_0, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 2, __reg_8_1);
__CALC8(__reg_8_1, __reg_8_1, __reg_8_1, __reg_8_1, __reg_8_2, __reg_7_4);
__STORE(__h - 1, __reg_8_2);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 16, __reg_8_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 15, __reg_8_3);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 14, __reg_8_4);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 13, __reg_8_0);
__reg_2_4 = __reg_1_4;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 12, __reg_8_1);
__reg_2_0 = __reg_1_0;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 11, __reg_8_2);
__reg_3_4 = __reg_2_4;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 10, __reg_8_3);
__reg_3_0 = __reg_2_0;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 9, __reg_8_4);
__reg_4_4 = __reg_3_4;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 8, __reg_8_0);
__reg_4_0 = __reg_3_0;
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 7, __reg_8_1);
__reg_5_4 = __reg_4_4;
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_2, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 6, __reg_8_2);
__reg_5_0 = __reg_4_0;
__CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 5, __reg_8_3);
__reg_6_4 = __reg_5_4;
__CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_2, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 4, __reg_8_4);
__reg_6_0 = __reg_5_0;
__CALC7(__reg_7_1, __reg_7_1, __reg_7_1, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 3, __reg_8_0);
__reg_7_4 = __reg_6_4;
__CALC7(__reg_7_2, __reg_7_2, __reg_7_2, __reg_7_2, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 2, __reg_8_1);
__reg_7_0 = __reg_6_0;
__CALC8(__reg_8_1, __reg_8_1, __reg_8_1, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 1, __reg_8_2);
__CALC8(__reg_8_2, __reg_8_2, __reg_8_2, __reg_8_2, __reg_8_3, __reg_7_0);
__STORE(__h + 0, __reg_8_3);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 16, __reg_8_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 15, __reg_8_3);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 14, __reg_8_4);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 13, __reg_8_0);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 12, __reg_8_1);
__reg_2_0 = __reg_1_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 11, __reg_8_2);
__reg_2_1 = __reg_1_1;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 10, __reg_8_3);
__reg_3_0 = __reg_2_0;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 9, __reg_8_4);
__reg_3_1 = __reg_2_1;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 8, __reg_8_0);
__reg_4_0 = __reg_3_0;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 7, __reg_8_1);
__reg_4_1 = __reg_3_1;
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 6, __reg_8_2);
__reg_5_0 = __reg_4_0;
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_3, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 5, __reg_8_3);
__reg_5_1 = __reg_4_1;
__CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 4, __reg_8_4);
__reg_6_0 = __reg_5_0;
__CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_3, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 3, __reg_8_0);
__reg_6_1 = __reg_5_1;
__CALC7(__reg_7_2, __reg_7_2, __reg_7_2, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 2, __reg_8_1);
__reg_7_0 = __reg_6_0;
__CALC7(__reg_7_3, __reg_7_3, __reg_7_3, __reg_7_3, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 1, __reg_8_2);
__reg_7_1 = __reg_6_1;
__CALC8(__reg_8_2, __reg_8_2, __reg_8_2, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h + 0, __reg_8_3);
__CALC8(__reg_8_3, __reg_8_3, __reg_8_3, __reg_8_3, __reg_8_4, __reg_7_1);
__STORE(__h + 1, __reg_8_4);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 16, __reg_8_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 15, __reg_8_3);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 14, __reg_8_4);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 13, __reg_8_0);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 12, __reg_8_1);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 11, __reg_8_2);
__reg_2_1 = __reg_1_1;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 10, __reg_8_3);
__reg_2_2 = __reg_1_2;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 9, __reg_8_4);
__reg_3_1 = __reg_2_1;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 8, __reg_8_0);
__reg_3_2 = __reg_2_2;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 7, __reg_8_1);
__reg_4_1 = __reg_3_1;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 6, __reg_8_2);
__reg_4_2 = __reg_3_2;
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 5, __reg_8_3);
__reg_5_1 = __reg_4_1;
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_4, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 4, __reg_8_4);
__reg_5_2 = __reg_4_2;
__CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 3, __reg_8_0);
__reg_6_1 = __reg_5_1;
__CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_4, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 2, __reg_8_1);
__reg_6_2 = __reg_5_2;
__CALC7(__reg_7_3, __reg_7_3, __reg_7_3, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 1, __reg_8_2);
__reg_7_1 = __reg_6_1;
__CALC7(__reg_7_4, __reg_7_4, __reg_7_4, __reg_7_4, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h + 0, __reg_8_3);
__reg_7_2 = __reg_6_2;
__CALC8(__reg_8_3, __reg_8_3, __reg_8_3, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h + 1, __reg_8_4);
__CALC8(__reg_8_4, __reg_8_4, __reg_8_4, __reg_8_4, __reg_8_0, __reg_7_2);
__STORE(__h + 2, __reg_8_0);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 16, __reg_8_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 15, __reg_8_3);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 14, __reg_8_4);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 13, __reg_8_0);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 12, __reg_8_1);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 11, __reg_8_2);
__reg_1_3 = __reg_0;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 10, __reg_8_3);
__reg_2_2 = __reg_1_2;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 9, __reg_8_4);
__reg_2_3 = __reg_1_3;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 8, __reg_8_0);
__reg_3_2 = __reg_2_2;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 7, __reg_8_1);
__reg_3_3 = __reg_2_3;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 6, __reg_8_2);
__reg_4_2 = __reg_3_2;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 5, __reg_8_3);
__reg_4_3 = __reg_3_3;
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 4, __reg_8_4);
__reg_5_2 = __reg_4_2;
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_0, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 3, __reg_8_0);
__reg_5_3 = __reg_4_3;
__CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 2, __reg_8_1);
__reg_6_2 = __reg_5_2;
__CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_0, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 1, __reg_8_2);
__reg_6_3 = __reg_5_3;
__CALC7(__reg_7_4, __reg_7_4, __reg_7_4, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h + 0, __reg_8_3);
__reg_7_2 = __reg_6_2;
__CALC7(__reg_7_0, __reg_7_0, __reg_7_0, __reg_7_0, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h + 1, __reg_8_4);
__reg_7_3 = __reg_6_3;
__CALC8(__reg_8_4, __reg_8_4, __reg_8_4, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h + 2, __reg_8_0);
__CALC8(__reg_8_0, __reg_8_0, __reg_8_0, __reg_8_0, __reg_8_1, __reg_7_3);
__STORE(__h + 3, __reg_8_1);
}
}
else
{
for (__h = 33; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 16, __reg_8_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 16, __reg_8_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 16, __reg_8_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 16, __reg_8_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 16, __reg_8_1);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 16, __reg_8_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 16, __reg_8_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 16, __reg_8_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 16, __reg_8_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 16, __reg_8_1);
__h++;
}
}
__global__ void kernel0_7(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 7;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 484;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_1_3;
float __reg_1_4;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_2_3;
float __reg_2_4;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_3_3;
float __reg_3_4;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_4_3;
float __reg_4_4;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
float __reg_5_3;
float __reg_5_4;
float __reg_6_0;
float __reg_6_1;
float __reg_6_2;
float __reg_6_3;
float __reg_6_4;
float __reg_7_0;
float __reg_7_1;
float __reg_7_2;
float __reg_7_3;
float __reg_7_4;
__shared__ float __a_sb_double[__blockSize * 2];
float *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __storeValid = __writeValid7;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { float etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { float etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC3(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC4(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC5(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC6(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid6) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC7(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid7) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_0);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_0);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_0);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_0);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_0);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_0);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_0);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_0);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(2, __reg_7_2);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(3, __reg_7_3);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(4, __reg_7_4);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(5, __reg_7_0);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(6, __reg_7_1);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(7, __reg_7_2);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(8, __reg_7_3);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(9, __reg_7_4);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(10, __reg_7_0);
__LOAD(__reg_0, 25);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(11, __reg_7_1);
__LOAD(__reg_0, 26);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(12, __reg_7_2);
__LOAD(__reg_0, 27);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(13, __reg_7_3);
__LOAD(__reg_0, 28);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(14, __reg_7_4);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__LOAD(__reg_0, 25);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__LOAD(__reg_0, 26);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__LOAD(__reg_0, 27);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__LOAD(__reg_0, 28);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(14, __reg_7_4);
}
__a_sb = __a_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 29; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 14, __reg_7_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 14, __reg_7_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 14, __reg_7_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 14, __reg_7_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 14, __reg_7_4);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 14, __reg_7_0);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 13, __reg_7_1);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 12, __reg_7_2);
__reg_2_4 = __reg_1_4;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 11, __reg_7_3);
__reg_2_0 = __reg_1_0;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 10, __reg_7_4);
__reg_3_4 = __reg_2_4;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 9, __reg_7_0);
__reg_3_0 = __reg_2_0;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 8, __reg_7_1);
__reg_4_4 = __reg_3_4;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 7, __reg_7_2);
__reg_4_0 = __reg_3_0;
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 6, __reg_7_3);
__reg_5_4 = __reg_4_4;
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_2, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 5, __reg_7_4);
__reg_5_0 = __reg_4_0;
__CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 4, __reg_7_0);
__reg_6_4 = __reg_5_4;
__CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_2, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 3, __reg_7_1);
__reg_6_0 = __reg_5_0;
__CALC7(__reg_7_1, __reg_7_1, __reg_7_1, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 2, __reg_7_2);
__CALC7(__reg_7_2, __reg_7_2, __reg_7_2, __reg_7_2, __reg_7_3, __reg_6_0);
__STORE(__h - 1, __reg_7_3);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 14, __reg_7_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 13, __reg_7_1);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 12, __reg_7_2);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 11, __reg_7_3);
__reg_2_0 = __reg_1_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 10, __reg_7_4);
__reg_2_1 = __reg_1_1;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 9, __reg_7_0);
__reg_3_0 = __reg_2_0;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 8, __reg_7_1);
__reg_3_1 = __reg_2_1;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 7, __reg_7_2);
__reg_4_0 = __reg_3_0;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 6, __reg_7_3);
__reg_4_1 = __reg_3_1;
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 5, __reg_7_4);
__reg_5_0 = __reg_4_0;
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_3, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 4, __reg_7_0);
__reg_5_1 = __reg_4_1;
__CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 3, __reg_7_1);
__reg_6_0 = __reg_5_0;
__CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_3, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 2, __reg_7_2);
__reg_6_1 = __reg_5_1;
__CALC7(__reg_7_2, __reg_7_2, __reg_7_2, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 1, __reg_7_3);
__CALC7(__reg_7_3, __reg_7_3, __reg_7_3, __reg_7_3, __reg_7_4, __reg_6_1);
__STORE(__h + 0, __reg_7_4);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 14, __reg_7_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 13, __reg_7_1);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 12, __reg_7_2);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 11, __reg_7_3);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 10, __reg_7_4);
__reg_2_1 = __reg_1_1;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 9, __reg_7_0);
__reg_2_2 = __reg_1_2;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 8, __reg_7_1);
__reg_3_1 = __reg_2_1;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 7, __reg_7_2);
__reg_3_2 = __reg_2_2;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 6, __reg_7_3);
__reg_4_1 = __reg_3_1;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 5, __reg_7_4);
__reg_4_2 = __reg_3_2;
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 4, __reg_7_0);
__reg_5_1 = __reg_4_1;
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_4, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 3, __reg_7_1);
__reg_5_2 = __reg_4_2;
__CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 2, __reg_7_2);
__reg_6_1 = __reg_5_1;
__CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_4, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 1, __reg_7_3);
__reg_6_2 = __reg_5_2;
__CALC7(__reg_7_3, __reg_7_3, __reg_7_3, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h + 0, __reg_7_4);
__CALC7(__reg_7_4, __reg_7_4, __reg_7_4, __reg_7_4, __reg_7_0, __reg_6_2);
__STORE(__h + 1, __reg_7_0);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 14, __reg_7_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 13, __reg_7_1);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 12, __reg_7_2);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 11, __reg_7_3);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 10, __reg_7_4);
__reg_1_3 = __reg_0;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 9, __reg_7_0);
__reg_2_2 = __reg_1_2;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 8, __reg_7_1);
__reg_2_3 = __reg_1_3;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 7, __reg_7_2);
__reg_3_2 = __reg_2_2;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 6, __reg_7_3);
__reg_3_3 = __reg_2_3;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 5, __reg_7_4);
__reg_4_2 = __reg_3_2;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 4, __reg_7_0);
__reg_4_3 = __reg_3_3;
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 3, __reg_7_1);
__reg_5_2 = __reg_4_2;
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_0, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 2, __reg_7_2);
__reg_5_3 = __reg_4_3;
__CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 1, __reg_7_3);
__reg_6_2 = __reg_5_2;
__CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_0, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h + 0, __reg_7_4);
__reg_6_3 = __reg_5_3;
__CALC7(__reg_7_4, __reg_7_4, __reg_7_4, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h + 1, __reg_7_0);
__CALC7(__reg_7_0, __reg_7_0, __reg_7_0, __reg_7_0, __reg_7_1, __reg_6_3);
__STORE(__h + 2, __reg_7_1);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 14, __reg_7_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 13, __reg_7_1);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 12, __reg_7_2);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 11, __reg_7_3);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 10, __reg_7_4);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 9, __reg_7_0);
__reg_1_4 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 8, __reg_7_1);
__reg_2_3 = __reg_1_3;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 7, __reg_7_2);
__reg_2_4 = __reg_1_4;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 6, __reg_7_3);
__reg_3_3 = __reg_2_3;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 5, __reg_7_4);
__reg_3_4 = __reg_2_4;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 4, __reg_7_0);
__reg_4_3 = __reg_3_3;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 3, __reg_7_1);
__reg_4_4 = __reg_3_4;
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 2, __reg_7_2);
__reg_5_3 = __reg_4_3;
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_1, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 1, __reg_7_3);
__reg_5_4 = __reg_4_4;
__CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h + 0, __reg_7_4);
__reg_6_3 = __reg_5_3;
__CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_1, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h + 1, __reg_7_0);
__reg_6_4 = __reg_5_4;
__CALC7(__reg_7_0, __reg_7_0, __reg_7_0, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h + 2, __reg_7_1);
__CALC7(__reg_7_1, __reg_7_1, __reg_7_1, __reg_7_1, __reg_7_2, __reg_6_4);
__STORE(__h + 3, __reg_7_2);
}
}
else
{
for (__h = 29; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 14, __reg_7_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 14, __reg_7_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 14, __reg_7_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 14, __reg_7_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 14, __reg_7_4);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 14, __reg_7_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 14, __reg_7_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 14, __reg_7_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 14, __reg_7_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 14, __reg_7_4);
__h++;
}
}
__global__ void kernel0_6(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 6;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 488;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_1_3;
float __reg_1_4;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_2_3;
float __reg_2_4;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_3_3;
float __reg_3_4;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_4_3;
float __reg_4_4;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
float __reg_5_3;
float __reg_5_4;
float __reg_6_0;
float __reg_6_1;
float __reg_6_2;
float __reg_6_3;
float __reg_6_4;
__shared__ float __a_sb_double[__blockSize * 2];
float *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __storeValid = __writeValid6;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { float etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { float etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC3(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC4(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC5(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC6(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid6) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_0);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_0);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_0);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_0);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_0);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_0);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(2, __reg_6_2);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(3, __reg_6_3);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(4, __reg_6_4);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(5, __reg_6_0);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(6, __reg_6_1);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(7, __reg_6_2);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(8, __reg_6_3);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(9, __reg_6_4);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(10, __reg_6_0);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(11, __reg_6_1);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(12, __reg_6_2);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(12, __reg_6_2);
}
__a_sb = __a_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 25; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 12, __reg_6_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 12, __reg_6_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 12, __reg_6_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 12, __reg_6_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 12, __reg_6_2);
__h++;
}
if (0) {}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 12, __reg_6_3);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 11, __reg_6_4);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 10, __reg_6_0);
__reg_2_0 = __reg_1_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 9, __reg_6_1);
__reg_2_1 = __reg_1_1;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 8, __reg_6_2);
__reg_3_0 = __reg_2_0;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 7, __reg_6_3);
__reg_3_1 = __reg_2_1;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 6, __reg_6_4);
__reg_4_0 = __reg_3_0;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 5, __reg_6_0);
__reg_4_1 = __reg_3_1;
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 4, __reg_6_1);
__reg_5_0 = __reg_4_0;
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_3, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 3, __reg_6_2);
__reg_5_1 = __reg_4_1;
__CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 2, __reg_6_3);
__CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_3, __reg_6_4, __reg_5_1);
__STORE(__h - 1, __reg_6_4);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 12, __reg_6_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 11, __reg_6_4);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 10, __reg_6_0);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 9, __reg_6_1);
__reg_2_1 = __reg_1_1;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 8, __reg_6_2);
__reg_2_2 = __reg_1_2;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 7, __reg_6_3);
__reg_3_1 = __reg_2_1;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 6, __reg_6_4);
__reg_3_2 = __reg_2_2;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 5, __reg_6_0);
__reg_4_1 = __reg_3_1;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 4, __reg_6_1);
__reg_4_2 = __reg_3_2;
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 3, __reg_6_2);
__reg_5_1 = __reg_4_1;
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_4, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 2, __reg_6_3);
__reg_5_2 = __reg_4_2;
__CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 1, __reg_6_4);
__CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_4, __reg_6_0, __reg_5_2);
__STORE(__h + 0, __reg_6_0);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 12, __reg_6_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 11, __reg_6_4);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 10, __reg_6_0);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 9, __reg_6_1);
__reg_1_3 = __reg_0;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 8, __reg_6_2);
__reg_2_2 = __reg_1_2;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 7, __reg_6_3);
__reg_2_3 = __reg_1_3;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 6, __reg_6_4);
__reg_3_2 = __reg_2_2;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 5, __reg_6_0);
__reg_3_3 = __reg_2_3;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 4, __reg_6_1);
__reg_4_2 = __reg_3_2;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 3, __reg_6_2);
__reg_4_3 = __reg_3_3;
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 2, __reg_6_3);
__reg_5_2 = __reg_4_2;
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_0, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 1, __reg_6_4);
__reg_5_3 = __reg_4_3;
__CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h + 0, __reg_6_0);
__CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_0, __reg_6_1, __reg_5_3);
__STORE(__h + 1, __reg_6_1);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 12, __reg_6_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 11, __reg_6_4);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 10, __reg_6_0);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 9, __reg_6_1);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 8, __reg_6_2);
__reg_1_4 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 7, __reg_6_3);
__reg_2_3 = __reg_1_3;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 6, __reg_6_4);
__reg_2_4 = __reg_1_4;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 5, __reg_6_0);
__reg_3_3 = __reg_2_3;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 4, __reg_6_1);
__reg_3_4 = __reg_2_4;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 3, __reg_6_2);
__reg_4_3 = __reg_3_3;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 2, __reg_6_3);
__reg_4_4 = __reg_3_4;
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 1, __reg_6_4);
__reg_5_3 = __reg_4_3;
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_1, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h + 0, __reg_6_0);
__reg_5_4 = __reg_4_4;
__CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h + 1, __reg_6_1);
__CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_1, __reg_6_2, __reg_5_4);
__STORE(__h + 2, __reg_6_2);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 12, __reg_6_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 11, __reg_6_4);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 10, __reg_6_0);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 9, __reg_6_1);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 8, __reg_6_2);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 7, __reg_6_3);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 6, __reg_6_4);
__reg_2_4 = __reg_1_4;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 5, __reg_6_0);
__reg_2_0 = __reg_1_0;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 4, __reg_6_1);
__reg_3_4 = __reg_2_4;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 3, __reg_6_2);
__reg_3_0 = __reg_2_0;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 2, __reg_6_3);
__reg_4_4 = __reg_3_4;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 1, __reg_6_4);
__reg_4_0 = __reg_3_0;
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h + 0, __reg_6_0);
__reg_5_4 = __reg_4_4;
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_2, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h + 1, __reg_6_1);
__reg_5_0 = __reg_4_0;
__CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h + 2, __reg_6_2);
__CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_2, __reg_6_3, __reg_5_0);
__STORE(__h + 3, __reg_6_3);
}
}
else
{
for (__h = 25; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 12, __reg_6_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 12, __reg_6_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 12, __reg_6_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 12, __reg_6_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 12, __reg_6_2);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 12, __reg_6_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 12, __reg_6_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 12, __reg_6_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 12, __reg_6_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 12, __reg_6_2);
__h++;
}
}
__global__ void kernel0_5(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 492;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_1_3;
float __reg_1_4;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_2_3;
float __reg_2_4;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_3_3;
float __reg_3_4;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_4_3;
float __reg_4_4;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
float __reg_5_3;
float __reg_5_4;
__shared__ float __a_sb_double[__blockSize * 2];
float *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __storeValid = __writeValid5;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { float etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { float etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC3(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC4(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC5(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_0);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_0);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_0);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_0);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(2, __reg_5_2);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(3, __reg_5_3);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(4, __reg_5_4);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(5, __reg_5_0);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(6, __reg_5_1);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(7, __reg_5_2);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(8, __reg_5_3);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(9, __reg_5_4);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(10, __reg_5_0);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(10, __reg_5_0);
}
__a_sb = __a_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 21; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 10, __reg_5_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 10, __reg_5_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 10, __reg_5_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 10, __reg_5_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 10, __reg_5_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 10, __reg_5_1);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 9, __reg_5_2);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 8, __reg_5_3);
__reg_2_1 = __reg_1_1;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 7, __reg_5_4);
__reg_2_2 = __reg_1_2;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 6, __reg_5_0);
__reg_3_1 = __reg_2_1;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 5, __reg_5_1);
__reg_3_2 = __reg_2_2;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 4, __reg_5_2);
__reg_4_1 = __reg_3_1;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 3, __reg_5_3);
__reg_4_2 = __reg_3_2;
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 2, __reg_5_4);
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_4, __reg_5_0, __reg_4_2);
__STORE(__h - 1, __reg_5_0);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 10, __reg_5_1);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 9, __reg_5_2);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 8, __reg_5_3);
__reg_1_3 = __reg_0;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 7, __reg_5_4);
__reg_2_2 = __reg_1_2;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 6, __reg_5_0);
__reg_2_3 = __reg_1_3;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 5, __reg_5_1);
__reg_3_2 = __reg_2_2;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 4, __reg_5_2);
__reg_3_3 = __reg_2_3;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 3, __reg_5_3);
__reg_4_2 = __reg_3_2;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 2, __reg_5_4);
__reg_4_3 = __reg_3_3;
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 1, __reg_5_0);
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_0, __reg_5_1, __reg_4_3);
__STORE(__h + 0, __reg_5_1);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 10, __reg_5_1);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 9, __reg_5_2);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 8, __reg_5_3);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 7, __reg_5_4);
__reg_1_4 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 6, __reg_5_0);
__reg_2_3 = __reg_1_3;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 5, __reg_5_1);
__reg_2_4 = __reg_1_4;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 4, __reg_5_2);
__reg_3_3 = __reg_2_3;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 3, __reg_5_3);
__reg_3_4 = __reg_2_4;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 2, __reg_5_4);
__reg_4_3 = __reg_3_3;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 1, __reg_5_0);
__reg_4_4 = __reg_3_4;
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h + 0, __reg_5_1);
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_1, __reg_5_2, __reg_4_4);
__STORE(__h + 1, __reg_5_2);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 10, __reg_5_1);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 9, __reg_5_2);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 8, __reg_5_3);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 7, __reg_5_4);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 6, __reg_5_0);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 5, __reg_5_1);
__reg_2_4 = __reg_1_4;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 4, __reg_5_2);
__reg_2_0 = __reg_1_0;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 3, __reg_5_3);
__reg_3_4 = __reg_2_4;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 2, __reg_5_4);
__reg_3_0 = __reg_2_0;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 1, __reg_5_0);
__reg_4_4 = __reg_3_4;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h + 0, __reg_5_1);
__reg_4_0 = __reg_3_0;
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h + 1, __reg_5_2);
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_2, __reg_5_3, __reg_4_0);
__STORE(__h + 2, __reg_5_3);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 10, __reg_5_1);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 9, __reg_5_2);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 8, __reg_5_3);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 7, __reg_5_4);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 6, __reg_5_0);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 5, __reg_5_1);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 4, __reg_5_2);
__reg_2_0 = __reg_1_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 3, __reg_5_3);
__reg_2_1 = __reg_1_1;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 2, __reg_5_4);
__reg_3_0 = __reg_2_0;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 1, __reg_5_0);
__reg_3_1 = __reg_2_1;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h + 0, __reg_5_1);
__reg_4_0 = __reg_3_0;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h + 1, __reg_5_2);
__reg_4_1 = __reg_3_1;
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h + 2, __reg_5_3);
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_3, __reg_5_4, __reg_4_1);
__STORE(__h + 3, __reg_5_4);
}
}
else
{
for (__h = 21; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 10, __reg_5_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 10, __reg_5_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 10, __reg_5_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 10, __reg_5_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 10, __reg_5_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 10, __reg_5_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 10, __reg_5_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 10, __reg_5_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 10, __reg_5_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 10, __reg_5_0);
__h++;
}
}
__global__ void kernel0_4(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_1_3;
float __reg_1_4;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_2_3;
float __reg_2_4;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_3_3;
float __reg_3_4;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_4_3;
float __reg_4_4;
__shared__ float __a_sb_double[__blockSize * 2];
float *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __storeValid = __writeValid4;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { float etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { float etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC3(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC4(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_0);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_0);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(2, __reg_4_2);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(3, __reg_4_3);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(4, __reg_4_4);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(5, __reg_4_0);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(6, __reg_4_1);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(7, __reg_4_2);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(8, __reg_4_3);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(8, __reg_4_3);
}
__a_sb = __a_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 17; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 8, __reg_4_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 8, __reg_4_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 8, __reg_4_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h - 8, __reg_4_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(__h - 8, __reg_4_3);
__h++;
}
if (0) {}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 8, __reg_4_4);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 7, __reg_4_0);
__reg_1_3 = __reg_0;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 6, __reg_4_1);
__reg_2_2 = __reg_1_2;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h - 5, __reg_4_2);
__reg_2_3 = __reg_1_3;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(__h - 4, __reg_4_3);
__reg_3_2 = __reg_2_2;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 3, __reg_4_4);
__reg_3_3 = __reg_2_3;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 2, __reg_4_0);
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_1, __reg_3_3);
__STORE(__h - 1, __reg_4_1);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 8, __reg_4_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 7, __reg_4_0);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 6, __reg_4_1);
__reg_1_4 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h - 5, __reg_4_2);
__reg_2_3 = __reg_1_3;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(__h - 4, __reg_4_3);
__reg_2_4 = __reg_1_4;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 3, __reg_4_4);
__reg_3_3 = __reg_2_3;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 2, __reg_4_0);
__reg_3_4 = __reg_2_4;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 1, __reg_4_1);
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_2, __reg_3_4);
__STORE(__h + 0, __reg_4_2);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 8, __reg_4_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 7, __reg_4_0);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 6, __reg_4_1);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h - 5, __reg_4_2);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(__h - 4, __reg_4_3);
__reg_2_4 = __reg_1_4;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 3, __reg_4_4);
__reg_2_0 = __reg_1_0;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 2, __reg_4_0);
__reg_3_4 = __reg_2_4;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 1, __reg_4_1);
__reg_3_0 = __reg_2_0;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h + 0, __reg_4_2);
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_3, __reg_3_0);
__STORE(__h + 1, __reg_4_3);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 8, __reg_4_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 7, __reg_4_0);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 6, __reg_4_1);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h - 5, __reg_4_2);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(__h - 4, __reg_4_3);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 3, __reg_4_4);
__reg_2_0 = __reg_1_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 2, __reg_4_0);
__reg_2_1 = __reg_1_1;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 1, __reg_4_1);
__reg_3_0 = __reg_2_0;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h + 0, __reg_4_2);
__reg_3_1 = __reg_2_1;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(__h + 1, __reg_4_3);
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_4, __reg_3_1);
__STORE(__h + 2, __reg_4_4);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 8, __reg_4_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 7, __reg_4_0);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 6, __reg_4_1);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h - 5, __reg_4_2);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(__h - 4, __reg_4_3);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 3, __reg_4_4);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 2, __reg_4_0);
__reg_2_1 = __reg_1_1;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 1, __reg_4_1);
__reg_2_2 = __reg_1_2;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h + 0, __reg_4_2);
__reg_3_1 = __reg_2_1;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(__h + 1, __reg_4_3);
__reg_3_2 = __reg_2_2;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h + 2, __reg_4_4);
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_0, __reg_3_2);
__STORE(__h + 3, __reg_4_0);
}
}
else
{
for (__h = 17; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 8, __reg_4_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 8, __reg_4_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 8, __reg_4_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h - 8, __reg_4_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(__h - 8, __reg_4_3);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 8, __reg_4_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 8, __reg_4_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 8, __reg_4_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h - 8, __reg_4_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(__h - 8, __reg_4_3);
__h++;
}
}
__global__ void kernel0_3(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_1_3;
float __reg_1_4;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_2_3;
float __reg_2_4;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_3_3;
float __reg_3_4;
__shared__ float __a_sb_double[__blockSize * 2];
float *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __storeValid = __writeValid3;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { float etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { float etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC3(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(2, __reg_3_2);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(3, __reg_3_3);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(4, __reg_3_4);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__STORE(5, __reg_3_0);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__STORE(6, __reg_3_1);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__STORE(6, __reg_3_1);
}
__a_sb = __a_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 13; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 6, __reg_3_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h - 6, __reg_3_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(__h - 6, __reg_3_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__STORE(__h - 6, __reg_3_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__STORE(__h - 6, __reg_3_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 6, __reg_3_2);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h - 5, __reg_3_3);
__reg_1_4 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(__h - 4, __reg_3_4);
__reg_2_3 = __reg_1_3;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__STORE(__h - 3, __reg_3_0);
__reg_2_4 = __reg_1_4;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_3);
__STORE(__h - 2, __reg_3_1);
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_4);
__STORE(__h - 1, __reg_3_2);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 6, __reg_3_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h - 5, __reg_3_3);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(__h - 4, __reg_3_4);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__STORE(__h - 3, __reg_3_0);
__reg_2_4 = __reg_1_4;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__STORE(__h - 2, __reg_3_1);
__reg_2_0 = __reg_1_0;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 1, __reg_3_2);
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_0);
__STORE(__h + 0, __reg_3_3);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 6, __reg_3_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h - 5, __reg_3_3);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(__h - 4, __reg_3_4);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__STORE(__h - 3, __reg_3_0);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__STORE(__h - 2, __reg_3_1);
__reg_2_0 = __reg_1_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 1, __reg_3_2);
__reg_2_1 = __reg_1_1;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h + 0, __reg_3_3);
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_1);
__STORE(__h + 1, __reg_3_4);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 6, __reg_3_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h - 5, __reg_3_3);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(__h - 4, __reg_3_4);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__STORE(__h - 3, __reg_3_0);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__STORE(__h - 2, __reg_3_1);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 1, __reg_3_2);
__reg_2_1 = __reg_1_1;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h + 0, __reg_3_3);
__reg_2_2 = __reg_1_2;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(__h + 1, __reg_3_4);
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_0, __reg_2_2);
__STORE(__h + 2, __reg_3_0);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 6, __reg_3_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h - 5, __reg_3_3);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(__h - 4, __reg_3_4);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__STORE(__h - 3, __reg_3_0);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__STORE(__h - 2, __reg_3_1);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 1, __reg_3_2);
__reg_1_3 = __reg_0;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h + 0, __reg_3_3);
__reg_2_2 = __reg_1_2;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(__h + 1, __reg_3_4);
__reg_2_3 = __reg_1_3;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_1, __reg_3_0, __reg_2_2);
__STORE(__h + 2, __reg_3_0);
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_3);
__STORE(__h + 3, __reg_3_1);
}
}
else
{
for (__h = 13; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 6, __reg_3_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h - 6, __reg_3_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(__h - 6, __reg_3_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__STORE(__h - 6, __reg_3_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__STORE(__h - 6, __reg_3_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 6, __reg_3_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h - 6, __reg_3_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(__h - 6, __reg_3_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__STORE(__h - 6, __reg_3_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__STORE(__h - 6, __reg_3_1);
__h++;
}
}
__global__ void kernel0_2(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_1_3;
float __reg_1_4;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_2_3;
float __reg_2_4;
__shared__ float __a_sb_double[__blockSize * 2];
float *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __storeValid = __writeValid2;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { float etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { float etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__STORE(2, __reg_2_2);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__STORE(3, __reg_2_3);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__STORE(4, __reg_2_4);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__STORE(4, __reg_2_4);
}
__a_sb = __a_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__STORE(__h - 4, __reg_2_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__STORE(__h - 4, __reg_2_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__STORE(__h - 4, __reg_2_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__STORE(__h - 4, __reg_2_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__STORE(__h - 4, __reg_2_4);
__h++;
}
if (0) {}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__STORE(__h - 4, __reg_2_0);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__STORE(__h - 3, __reg_2_1);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4);
__STORE(__h - 2, __reg_2_2);
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0);
__STORE(__h - 1, __reg_2_3);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__STORE(__h - 4, __reg_2_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__STORE(__h - 3, __reg_2_1);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__STORE(__h - 2, __reg_2_2);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0);
__STORE(__h - 1, __reg_2_3);
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1);
__STORE(__h + 0, __reg_2_4);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__STORE(__h - 4, __reg_2_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__STORE(__h - 3, __reg_2_1);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__STORE(__h - 2, __reg_2_2);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__STORE(__h - 1, __reg_2_3);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1);
__STORE(__h + 0, __reg_2_4);
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2);
__STORE(__h + 1, __reg_2_0);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__STORE(__h - 4, __reg_2_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__STORE(__h - 3, __reg_2_1);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__STORE(__h - 2, __reg_2_2);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__STORE(__h - 1, __reg_2_3);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__STORE(__h + 0, __reg_2_4);
__reg_1_3 = __reg_0;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2);
__STORE(__h + 1, __reg_2_0);
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3);
__STORE(__h + 2, __reg_2_1);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__STORE(__h - 4, __reg_2_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__STORE(__h - 3, __reg_2_1);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__STORE(__h - 2, __reg_2_2);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__STORE(__h - 1, __reg_2_3);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__STORE(__h + 0, __reg_2_4);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__STORE(__h + 1, __reg_2_0);
__reg_1_4 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3);
__STORE(__h + 2, __reg_2_1);
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4);
__STORE(__h + 3, __reg_2_2);
}
}
else
{
for (__h = 9; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__STORE(__h - 4, __reg_2_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__STORE(__h - 4, __reg_2_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__STORE(__h - 4, __reg_2_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__STORE(__h - 4, __reg_2_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__STORE(__h - 4, __reg_2_4);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__STORE(__h - 4, __reg_2_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__STORE(__h - 4, __reg_2_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__STORE(__h - 4, __reg_2_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__STORE(__h - 4, __reg_2_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__STORE(__h - 4, __reg_2_4);
__h++;
}
}
__global__ void kernel0_1(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_1_3;
float __reg_1_4;
__shared__ float __a_sb_double[__blockSize * 2];
float *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __storeValid = __writeValid1;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { float etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { float etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(2, __reg_1_2);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(2, __reg_1_2);
}
__a_sb = __a_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__STORE(__h - 2, __reg_1_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 2, __reg_1_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 2, __reg_1_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h - 2, __reg_1_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__STORE(__h - 1, __reg_1_4);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0);
__STORE(__h - 1, __reg_1_4);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__STORE(__h - 1, __reg_1_4);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__STORE(__h - 1, __reg_1_4);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__STORE(__h + 2, __reg_1_2);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__STORE(__h - 1, __reg_1_4);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h + 2, __reg_1_2);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__STORE(__h + 3, __reg_1_3);
}
}
else
{
for (__h = 5; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__STORE(__h - 2, __reg_1_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 2, __reg_1_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 2, __reg_1_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h - 2, __reg_1_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__STORE(__h - 2, __reg_1_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 2, __reg_1_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 2, __reg_1_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h - 2, __reg_1_2);
__h++;
}
}
|
9ddbca8f722560a939c3c786a0df2056acbaf65b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <fstream>
#include <time.h>
#include <float.h>
#include <hiprand/hiprand_kernel.h>
#include "sphere.h"
#include "hitable_list.h"
#include "camera.h"
#include "material.h"
#include "device_launch_parameters.h"
#include "display.h"
#include <thread>
// limited version of checkCudaErrors from helper_cuda.h in CUDA examples
#define checkCudaErrors(val) check_cuda( (val), #val, __FILE__, __LINE__ )
void check_cuda(hipError_t result, char const* const func, const char* const file, int const line) {
if (result) {
std::cerr << "CUDA error = " << static_cast<unsigned int>(result) << " at " <<
file << ":" << line << " '" << func << "' \n";
// Make sure we call CUDA Device Reset before exiting
hipDeviceReset();
exit(99);
}
}
// Matching the C++ code would recurse enough into color() calls that
// it was blowing up the stack, so we have to turn this into a
// limited-depth loop instead. Later code in the book limits to a max
// depth of 50, so we adapt this a few chapters early on the GPU.
__device__ vec3 color(const ray& r, hitable** world, hiprandState_t* local_rand_state) {
ray cur_ray = r;
vec3 cur_attenuation = vec3(1.0, 1.0, 1.0);
for (int i = 0; i < 50; i++) {
hit_record rec;
if ((*world)->hit(cur_ray, 0.1f, FLT_MAX, rec)) {
ray scattered;
vec3 attenuation;
if (rec.mat_ptr->scatter(cur_ray, rec, attenuation, scattered, local_rand_state)) {
cur_attenuation *= attenuation;
cur_ray = scattered;
}
else {
return vec3(0.0, 0.0, 0.0);
}
}
else {
vec3 unit_direction = unit_vector(cur_ray.direction());
float t = 0.5f * (unit_direction.y() + 1.0f);
vec3 c = (1.0f - t) * vec3(1.0, 1.0, 1.0) + t * vec3(0.5, 0.7, 1.0);
return cur_attenuation * c;
}
}
return vec3(0.0, 0.0, 0.0); // exceeded recursion
}
__global__ void rand_init(hiprandState_t* rand_state) {
if (threadIdx.x == 0 && blockIdx.x == 0) {
hiprand_init(1984, 0, 0, rand_state);
}
}
__global__ void render_init(int max_x, int max_y, hiprandState_t* rand_state) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if ((i >= max_x) || (j >= max_y)) return;
int pixel_index = j * max_x + i;
//Each thread gets same seed, a different sequence number, no offset
hiprand_init(1984, pixel_index, 0, &rand_state[pixel_index]);
}
__global__ void render(vec3* fb, int max_x, int max_y, int ns, camera** cam, hitable** world, hiprandState_t* rand_state) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if ((i >= max_x) || (j >= max_y)) return;
int pixel_index = j * max_x + i;
hiprandState_t local_rand_state = rand_state[pixel_index];
vec3 col(0, 0, 0);
for (int s = 0; s < ns; s++) {
float u = float(i + hiprand_uniform(&local_rand_state)) / float(max_x);
float v = float(j + hiprand_uniform(&local_rand_state)) / float(max_y);
ray r = (*cam)->get_ray(u, v, &local_rand_state);
col += color(r, world, &local_rand_state);
}
rand_state[pixel_index] = local_rand_state;
col /= float(ns);
col[0] = sqrt(col[0]);
col[1] = sqrt(col[1]);
col[2] = sqrt(col[2]);
fb[pixel_index] = col;
}
#define RND (hiprand_uniform(&local_rand_state))
__global__ void create_world(hitable** d_list, hitable** d_world, camera** d_camera, int width, int height, hiprandState_t* rand_state, int move, int c_move) {
if (threadIdx.x == 0 && blockIdx.x == 0) {
hiprandState_t local_rand_state = *rand_state;
d_list[0] = new sphere(vec3(0, -1000.0, -1), 1000,
new lambertian(vec3(0.5, 0.5, 0.5)));
int i = 1;
for (int a = -11; a < 11; a++) {
for (int b = -11; b < 11; b++) {
float choose_mat = RND;
vec3 center(a + RND, 0.2, b + RND);
if (choose_mat < 0.8f) {
d_list[i++] = new sphere(center, 0.2,
new lambertian(vec3(RND * RND, RND * RND, RND * RND)));
}
else if (choose_mat < 0.95f) {
d_list[i++] = new sphere(center, 0.2,
new metal(vec3(0.5f * (1.0f + RND), 0.5f * (1.0f + RND), 0.5f * (1.0f + RND)), 0.5f * RND));
}
else {
d_list[i++] = new sphere(center, 0.2, new dielectric(1.5));
}
}
}
d_list[i++] = new sphere(vec3(0, 1, 0), 1.0, new dielectric(1.5));
d_list[i++] = new sphere(vec3(-4, 1, 0), 1.0, new lambertian(vec3(0.4, 0.2, 0.1)));
d_list[i++] = new sphere(vec3(4, 1, 0), 1.0, new metal(vec3(0.7, 0.6, 0.5), 0.0));
*rand_state = local_rand_state;
*d_world = new hitable_list(d_list, 22 * 22 + 1 + 3);
//vec3 lookfrom(13, 2, 3);
//vec3 lookat(0, 0, 0);
//float dist_to_focus = 10.0; //(lookfrom - lookat).length();
//float aperture = 0.01;
//*d_camera = new camera(lookfrom,
// lookat,
// vec3(0, 1, 0),
// 20.0,
// float(width) / float(height),
// aperture,
// dist_to_focus);
//vec3 lookfrom(move, 2, c_move);
//vec3 lookat(0, 0, 0);
//float dist_to_focus = 10.0; //(lookfrom - lookat).length();
//float aperture = 0.01;
//*d_camera = new camera(lookfrom,
// lookat,
// vec3(0, 1, 0),
// 20.0,
// float(width) / float(height),
// aperture,
// dist_to_focus);
}
}
__global__ void move_camera(camera** d_camera, int move, int c_move, int width, int height) {
vec3 lookfrom(move, 2, c_move);
vec3 lookat(0, 0, 0);
float dist_to_focus = 10.0; //(lookfrom - lookat).length();
float aperture = 0.01;
*d_camera = new camera(lookfrom,
lookat,
vec3(0, 1, 0),
20.0,
float(width) / float(height),
aperture,
dist_to_focus);
}
__global__ void free_world(hitable** d_list, hitable** d_world, camera** d_camera) {
for (int i = 0; i < 22 * 22 + 1 + 3; i++) {
delete ((sphere*)d_list[i])->mat_ptr;
delete d_list[i];
}
delete* d_world;
delete* d_camera;
}
void foo(int width, int height, int tx, int ty,int ns,display* window ,vec3* fb ,camera** d_camera, hitable** d_world,hiprandState_t* d_rand_state) {
//Render Scene
clock_t start, stop;
start = clock();
// Render our buffer
dim3 blocks(width / tx + 1, height / ty + 1);
dim3 threads(tx, ty);
hipLaunchKernelGGL(( render_init) , dim3(blocks), dim3(threads) , 0, 0, width, height, d_rand_state);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
hipLaunchKernelGGL(( render) , dim3(blocks), dim3(threads) , 0, 0, fb, width, height, ns, d_camera, d_world, d_rand_state);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
stop = clock();
double timer_seconds = ((double)(stop - start)) / CLOCKS_PER_SEC;
std::cerr << "took " << timer_seconds << " seconds.\n";
}
int main(int argc, char* argv[]) {
int width = 200;
int height = 100;
int ns = 10;
int tx = 16;
int ty = 16;
int d_move = 13;
int c_move = 3;
std::cerr << "Rendering a " << width << "x" << height << " image with " << ns << " samples per pixel ";
std::cerr << "in " << tx << "x" << ty << " blocks.\n";
int num_pixels = width * height;
size_t fb_size = num_pixels * sizeof(vec3);
// allocate FB
vec3* fb;
checkCudaErrors(hipMallocManaged((void**)&fb, fb_size));
// allocate random state
hiprandState_t* d_rand_state;
checkCudaErrors(hipMalloc((void**)&d_rand_state, num_pixels * sizeof(hiprandState_t)));
hiprandState_t* d_rand_state2;
checkCudaErrors(hipMalloc((void**)&d_rand_state2, 1 * sizeof(hiprandState_t)));
// we need that 2nd random state to be initialized for the world creation
hipLaunchKernelGGL(( rand_init) , dim3(1), dim3(1) , 0, 0, d_rand_state2);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
// make our world of hitables & the camera
hitable** d_list;
int num_hitables = 22 * 22 + 1 + 3;
checkCudaErrors(hipMalloc((void**)&d_list, num_hitables * sizeof(hitable*)));
hitable** d_world;
checkCudaErrors(hipMalloc((void**)&d_world, sizeof(hitable*)));
camera** d_camera;
checkCudaErrors(hipMalloc((void**)&d_camera, sizeof(camera*)));
create_world << <1, 1 >> > (d_list, d_world, d_camera, width, height, d_rand_state2, d_move, c_move);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
// Output FB as Image
display* window;
window = new display("joking", width, height);
while (window->get_status()) {
SDL_Event e;
while (SDL_PollEvent(&e) != 0)
{
if (e.type == SDL_QUIT)
{
window->close();
}
}
window->clear_render();
move_camera << <1, 1 >> > (d_camera, d_move, c_move, width, height);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
//Render Scene
clock_t start, stop;
start = clock();
// Render our buffer
dim3 blocks(width / tx + 1, height / ty + 1);
dim3 threads(tx, ty);
render_init << <blocks, threads >> > (width, height, d_rand_state);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
render << <blocks, threads >> > (fb, width, height, ns, d_camera, d_world, d_rand_state);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
stop = clock();
double timer_seconds = ((double)(stop - start)) / CLOCKS_PER_SEC;
std::cerr << "took " << timer_seconds << " seconds.\n";
window->update(fb);
d_move = d_move + 1;
c_move = c_move - 2;
std::cerr << d_move << "\n";
std::cerr << c_move << "\n";
}
// clean up
checkCudaErrors(hipDeviceSynchronize());
free_world << <1, 1 >> > (d_list, d_world, d_camera);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipFree(d_camera));
checkCudaErrors(hipFree(d_world));
checkCudaErrors(hipFree(d_list));
checkCudaErrors(hipFree(d_rand_state));
checkCudaErrors(hipFree(fb));
hipDeviceReset();
return 0;
} | 9ddbca8f722560a939c3c786a0df2056acbaf65b.cu | #include <iostream>
#include <fstream>
#include <time.h>
#include <float.h>
#include <curand_kernel.h>
#include "sphere.h"
#include "hitable_list.h"
#include "camera.h"
#include "material.h"
#include "device_launch_parameters.h"
#include "display.h"
#include <thread>
// limited version of checkCudaErrors from helper_cuda.h in CUDA examples
#define checkCudaErrors(val) check_cuda( (val), #val, __FILE__, __LINE__ )
void check_cuda(cudaError_t result, char const* const func, const char* const file, int const line) {
if (result) {
std::cerr << "CUDA error = " << static_cast<unsigned int>(result) << " at " <<
file << ":" << line << " '" << func << "' \n";
// Make sure we call CUDA Device Reset before exiting
cudaDeviceReset();
exit(99);
}
}
// Matching the C++ code would recurse enough into color() calls that
// it was blowing up the stack, so we have to turn this into a
// limited-depth loop instead. Later code in the book limits to a max
// depth of 50, so we adapt this a few chapters early on the GPU.
__device__ vec3 color(const ray& r, hitable** world, curandState* local_rand_state) {
ray cur_ray = r;
vec3 cur_attenuation = vec3(1.0, 1.0, 1.0);
for (int i = 0; i < 50; i++) {
hit_record rec;
if ((*world)->hit(cur_ray, 0.1f, FLT_MAX, rec)) {
ray scattered;
vec3 attenuation;
if (rec.mat_ptr->scatter(cur_ray, rec, attenuation, scattered, local_rand_state)) {
cur_attenuation *= attenuation;
cur_ray = scattered;
}
else {
return vec3(0.0, 0.0, 0.0);
}
}
else {
vec3 unit_direction = unit_vector(cur_ray.direction());
float t = 0.5f * (unit_direction.y() + 1.0f);
vec3 c = (1.0f - t) * vec3(1.0, 1.0, 1.0) + t * vec3(0.5, 0.7, 1.0);
return cur_attenuation * c;
}
}
return vec3(0.0, 0.0, 0.0); // exceeded recursion
}
__global__ void rand_init(curandState* rand_state) {
if (threadIdx.x == 0 && blockIdx.x == 0) {
curand_init(1984, 0, 0, rand_state);
}
}
__global__ void render_init(int max_x, int max_y, curandState* rand_state) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if ((i >= max_x) || (j >= max_y)) return;
int pixel_index = j * max_x + i;
//Each thread gets same seed, a different sequence number, no offset
curand_init(1984, pixel_index, 0, &rand_state[pixel_index]);
}
__global__ void render(vec3* fb, int max_x, int max_y, int ns, camera** cam, hitable** world, curandState* rand_state) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if ((i >= max_x) || (j >= max_y)) return;
int pixel_index = j * max_x + i;
curandState local_rand_state = rand_state[pixel_index];
vec3 col(0, 0, 0);
for (int s = 0; s < ns; s++) {
float u = float(i + curand_uniform(&local_rand_state)) / float(max_x);
float v = float(j + curand_uniform(&local_rand_state)) / float(max_y);
ray r = (*cam)->get_ray(u, v, &local_rand_state);
col += color(r, world, &local_rand_state);
}
rand_state[pixel_index] = local_rand_state;
col /= float(ns);
col[0] = sqrt(col[0]);
col[1] = sqrt(col[1]);
col[2] = sqrt(col[2]);
fb[pixel_index] = col;
}
#define RND (curand_uniform(&local_rand_state))
__global__ void create_world(hitable** d_list, hitable** d_world, camera** d_camera, int width, int height, curandState* rand_state, int move, int c_move) {
if (threadIdx.x == 0 && blockIdx.x == 0) {
curandState local_rand_state = *rand_state;
d_list[0] = new sphere(vec3(0, -1000.0, -1), 1000,
new lambertian(vec3(0.5, 0.5, 0.5)));
int i = 1;
for (int a = -11; a < 11; a++) {
for (int b = -11; b < 11; b++) {
float choose_mat = RND;
vec3 center(a + RND, 0.2, b + RND);
if (choose_mat < 0.8f) {
d_list[i++] = new sphere(center, 0.2,
new lambertian(vec3(RND * RND, RND * RND, RND * RND)));
}
else if (choose_mat < 0.95f) {
d_list[i++] = new sphere(center, 0.2,
new metal(vec3(0.5f * (1.0f + RND), 0.5f * (1.0f + RND), 0.5f * (1.0f + RND)), 0.5f * RND));
}
else {
d_list[i++] = new sphere(center, 0.2, new dielectric(1.5));
}
}
}
d_list[i++] = new sphere(vec3(0, 1, 0), 1.0, new dielectric(1.5));
d_list[i++] = new sphere(vec3(-4, 1, 0), 1.0, new lambertian(vec3(0.4, 0.2, 0.1)));
d_list[i++] = new sphere(vec3(4, 1, 0), 1.0, new metal(vec3(0.7, 0.6, 0.5), 0.0));
*rand_state = local_rand_state;
*d_world = new hitable_list(d_list, 22 * 22 + 1 + 3);
//vec3 lookfrom(13, 2, 3);
//vec3 lookat(0, 0, 0);
//float dist_to_focus = 10.0; //(lookfrom - lookat).length();
//float aperture = 0.01;
//*d_camera = new camera(lookfrom,
// lookat,
// vec3(0, 1, 0),
// 20.0,
// float(width) / float(height),
// aperture,
// dist_to_focus);
//vec3 lookfrom(move, 2, c_move);
//vec3 lookat(0, 0, 0);
//float dist_to_focus = 10.0; //(lookfrom - lookat).length();
//float aperture = 0.01;
//*d_camera = new camera(lookfrom,
// lookat,
// vec3(0, 1, 0),
// 20.0,
// float(width) / float(height),
// aperture,
// dist_to_focus);
}
}
__global__ void move_camera(camera** d_camera, int move, int c_move, int width, int height) {
vec3 lookfrom(move, 2, c_move);
vec3 lookat(0, 0, 0);
float dist_to_focus = 10.0; //(lookfrom - lookat).length();
float aperture = 0.01;
*d_camera = new camera(lookfrom,
lookat,
vec3(0, 1, 0),
20.0,
float(width) / float(height),
aperture,
dist_to_focus);
}
__global__ void free_world(hitable** d_list, hitable** d_world, camera** d_camera) {
for (int i = 0; i < 22 * 22 + 1 + 3; i++) {
delete ((sphere*)d_list[i])->mat_ptr;
delete d_list[i];
}
delete* d_world;
delete* d_camera;
}
void foo(int width, int height, int tx, int ty,int ns,display* window ,vec3* fb ,camera** d_camera, hitable** d_world,curandState* d_rand_state) {
//Render Scene
clock_t start, stop;
start = clock();
// Render our buffer
dim3 blocks(width / tx + 1, height / ty + 1);
dim3 threads(tx, ty);
render_init <<<blocks, threads >>> (width, height, d_rand_state);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
render <<<blocks, threads >>> (fb, width, height, ns, d_camera, d_world, d_rand_state);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
stop = clock();
double timer_seconds = ((double)(stop - start)) / CLOCKS_PER_SEC;
std::cerr << "took " << timer_seconds << " seconds.\n";
}
int main(int argc, char* argv[]) {
int width = 200;
int height = 100;
int ns = 10;
int tx = 16;
int ty = 16;
int d_move = 13;
int c_move = 3;
std::cerr << "Rendering a " << width << "x" << height << " image with " << ns << " samples per pixel ";
std::cerr << "in " << tx << "x" << ty << " blocks.\n";
int num_pixels = width * height;
size_t fb_size = num_pixels * sizeof(vec3);
// allocate FB
vec3* fb;
checkCudaErrors(cudaMallocManaged((void**)&fb, fb_size));
// allocate random state
curandState* d_rand_state;
checkCudaErrors(cudaMalloc((void**)&d_rand_state, num_pixels * sizeof(curandState)));
curandState* d_rand_state2;
checkCudaErrors(cudaMalloc((void**)&d_rand_state2, 1 * sizeof(curandState)));
// we need that 2nd random state to be initialized for the world creation
rand_init <<<1, 1 >>> (d_rand_state2);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
// make our world of hitables & the camera
hitable** d_list;
int num_hitables = 22 * 22 + 1 + 3;
checkCudaErrors(cudaMalloc((void**)&d_list, num_hitables * sizeof(hitable*)));
hitable** d_world;
checkCudaErrors(cudaMalloc((void**)&d_world, sizeof(hitable*)));
camera** d_camera;
checkCudaErrors(cudaMalloc((void**)&d_camera, sizeof(camera*)));
create_world << <1, 1 >> > (d_list, d_world, d_camera, width, height, d_rand_state2, d_move, c_move);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
// Output FB as Image
display* window;
window = new display("joking", width, height);
while (window->get_status()) {
SDL_Event e;
while (SDL_PollEvent(&e) != 0)
{
if (e.type == SDL_QUIT)
{
window->close();
}
}
window->clear_render();
move_camera << <1, 1 >> > (d_camera, d_move, c_move, width, height);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
//Render Scene
clock_t start, stop;
start = clock();
// Render our buffer
dim3 blocks(width / tx + 1, height / ty + 1);
dim3 threads(tx, ty);
render_init << <blocks, threads >> > (width, height, d_rand_state);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
render << <blocks, threads >> > (fb, width, height, ns, d_camera, d_world, d_rand_state);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
stop = clock();
double timer_seconds = ((double)(stop - start)) / CLOCKS_PER_SEC;
std::cerr << "took " << timer_seconds << " seconds.\n";
window->update(fb);
d_move = d_move + 1;
c_move = c_move - 2;
std::cerr << d_move << "\n";
std::cerr << c_move << "\n";
}
// clean up
checkCudaErrors(cudaDeviceSynchronize());
free_world << <1, 1 >> > (d_list, d_world, d_camera);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaFree(d_camera));
checkCudaErrors(cudaFree(d_world));
checkCudaErrors(cudaFree(d_list));
checkCudaErrors(cudaFree(d_rand_state));
checkCudaErrors(cudaFree(fb));
cudaDeviceReset();
return 0;
} |
84d1bbe300d3b3bcb15b6c37b54c56d41d1c2c39.hip | // !!! This is a file automatically generated by hipify!!!
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include "srad.h"
#include <sys/time.h>
double gettime() {
struct timeval t;
gettimeofday(&t,NULL);
return t.tv_sec+t.tv_usec*1e-6;
}
// includes, project
#include <hip/hip_runtime.h>
// includes, kernels
#include "srad_kernel.hip"
void random_matrix(float *I, int rows, int cols);
void runTest( int argc, char** argv);
void usage(int argc, char **argv)
{
fprintf(stderr, "Usage: %s <rows> <cols> <y1> <y2> <x1> <x2> <lamda> <no. of iter>\n", argv[0]);
fprintf(stderr, "\t<rows> - number of rows\n");
fprintf(stderr, "\t<cols> - number of cols\n");
fprintf(stderr, "\t<y1> - y1 value of the speckle\n");
fprintf(stderr, "\t<y2> - y2 value of the speckle\n");
fprintf(stderr, "\t<x1> - x1 value of the speckle\n");
fprintf(stderr, "\t<x2> - x2 value of the speckle\n");
fprintf(stderr, "\t<lamda> - lambda (0,1)\n");
fprintf(stderr, "\t<no. of iter> - number of iterations\n");
fprintf(stderr, "\t<Used Device> - GPU Device ID\n");
exit(1);
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
printf("WG size of kernel = %d X %d\n", BLOCK_SIZE, BLOCK_SIZE);
runTest( argc, argv);
return EXIT_SUCCESS;
}
void
runTest( int argc, char** argv)
{
int rows, cols, size_I, size_R, niter = 10, iter;
int dev_id = 0;
float *I, *J, lambda, q0sqr, sum, sum2, tmp, meanROI,varROI ;
#ifdef CPU
float Jc, G2, L, num, den, qsqr;
int *iN,*iS,*jE,*jW, k;
float *dN,*dS,*dW,*dE;
float cN,cS,cW,cE,D;
#endif
#ifdef GPU
float *J_cuda;
float *C_cuda;
float *E_C, *W_C, *N_C, *S_C;
#endif
unsigned int r1, r2, c1, c2;
float *c;
if (argc == 10)
{
rows = atoi(argv[1]); //number of rows in the domain
cols = atoi(argv[2]); //number of cols in the domain
if ((rows%16!=0) || (cols%16!=0)){
fprintf(stderr, "rows and cols must be multiples of 16\n");
exit(1);
}
r1 = atoi(argv[3]); //y1 position of the speckle
r2 = atoi(argv[4]); //y2 position of the speckle
c1 = atoi(argv[5]); //x1 position of the speckle
c2 = atoi(argv[6]); //x2 position of the speckle
lambda = atof(argv[7]); //Lambda value
niter = atoi(argv[8]); //number of iterations
dev_id = atoi(argv[9]); //Used device
}
else{
usage(argc, argv);
}
hipDeviceProp_t prop;
printf("Device ID is %d \n", dev_id);
printf("Choosing CUDA Device...\n");
hipError_t set_result = hipSetDevice(dev_id);
printf("Set Result is: %s\n", hipGetErrorString(set_result));
hipGetDevice(&dev_id);
hipGetDeviceProperties(&prop, dev_id);
printf("Name: %s\n", prop.name);
printf("Running %d iterations.\n", niter);
size_I = cols * rows;
size_R = (r2-r1+1)*(c2-c1+1);
I = (float *)malloc( size_I * sizeof(float) );
J = (float *)malloc( size_I * sizeof(float) );
c = (float *)malloc(sizeof(float)* size_I) ;
#ifdef CPU
iN = (int *)malloc(sizeof(unsigned int*) * rows) ;
iS = (int *)malloc(sizeof(unsigned int*) * rows) ;
jW = (int *)malloc(sizeof(unsigned int*) * cols) ;
jE = (int *)malloc(sizeof(unsigned int*) * cols) ;
dN = (float *)malloc(sizeof(float)* size_I) ;
dS = (float *)malloc(sizeof(float)* size_I) ;
dW = (float *)malloc(sizeof(float)* size_I) ;
dE = (float *)malloc(sizeof(float)* size_I) ;
for (int i=0; i< rows; i++) {
iN[i] = i-1;
iS[i] = i+1;
}
for (int j=0; j< cols; j++) {
jW[j] = j-1;
jE[j] = j+1;
}
iN[0] = 0;
iS[rows-1] = rows-1;
jW[0] = 0;
jE[cols-1] = cols-1;
#endif
#ifdef GPU
//Allocate device memory
hipMalloc((void**)& J_cuda, sizeof(float)* size_I);
hipMalloc((void**)& C_cuda, sizeof(float)* size_I);
hipMalloc((void**)& E_C, sizeof(float)* size_I);
hipMalloc((void**)& W_C, sizeof(float)* size_I);
hipMalloc((void**)& S_C, sizeof(float)* size_I);
hipMalloc((void**)& N_C, sizeof(float)* size_I);
#endif
printf("Randomizing the input matrix\n");
//Generate a random matrix
random_matrix(I, rows, cols);
for (int k = 0; k < size_I; k++ ) {
J[k] = (float)exp(I[k]) ;
}
printf("Start the SRAD main loop\n");
double start = gettime();
for (iter=0; iter< niter; iter++){
sum=0; sum2=0;
for (int i=r1; i<=r2; i++) {
for (int j=c1; j<=c2; j++) {
tmp = J[i * cols + j];
sum += tmp ;
sum2 += tmp*tmp;
}
}
meanROI = sum / size_R;
varROI = (sum2 / size_R) - meanROI*meanROI;
q0sqr = varROI / (meanROI*meanROI);
#ifdef CPU
for (int i = 0 ; i < rows ; i++) {
for (int j = 0; j < cols; j++) {
k = i * cols + j;
Jc = J[k];
// directional derivates
dN[k] = J[iN[i] * cols + j] - Jc;
dS[k] = J[iS[i] * cols + j] - Jc;
dW[k] = J[i * cols + jW[j]] - Jc;
dE[k] = J[i * cols + jE[j]] - Jc;
G2 = (dN[k]*dN[k] + dS[k]*dS[k]
+ dW[k]*dW[k] + dE[k]*dE[k]) / (Jc*Jc);
L = (dN[k] + dS[k] + dW[k] + dE[k]) / Jc;
num = (0.5*G2) - ((1.0/16.0)*(L*L)) ;
den = 1 + (.25*L);
qsqr = num/(den*den);
// diffusion coefficent (equ 33)
den = (qsqr-q0sqr) / (q0sqr * (1+q0sqr)) ;
c[k] = 1.0 / (1.0+den) ;
// saturate diffusion coefficent
if (c[k] < 0) {c[k] = 0;}
else if (c[k] > 1) {c[k] = 1;}
}
}
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
// current index
k = i * cols + j;
// diffusion coefficent
cN = c[k];
cS = c[iS[i] * cols + j];
cW = c[k];
cE = c[i * cols + jE[j]];
// divergence (equ 58)
D = cN * dN[k] + cS * dS[k] + cW * dW[k] + cE * dE[k];
// image update (equ 61)
J[k] = J[k] + 0.25*lambda*D;
}
}
#endif // CPU
#ifdef GPU
//Currently the input size must be divided by 16 - the block size
int block_x = cols/BLOCK_SIZE ;
int block_y = rows/BLOCK_SIZE ;
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(block_x , block_y);
//Copy data from main memory to device memory
hipMemcpy(J_cuda, J, sizeof(float) * size_I, hipMemcpyHostToDevice);
//Run kernels
hipLaunchKernelGGL(( srad_cuda_1), dim3(dimGrid), dim3(dimBlock), 0, 0, E_C, W_C, N_C, S_C, J_cuda, C_cuda, cols, rows, q0sqr);
hipLaunchKernelGGL(( srad_cuda_2), dim3(dimGrid), dim3(dimBlock), 0, 0, E_C, W_C, N_C, S_C, J_cuda, C_cuda, cols, rows, lambda, q0sqr);
//Copy data from device memory to main memory
hipMemcpy(J, J_cuda, sizeof(float) * size_I, hipMemcpyDeviceToHost);
#endif
}
hipDeviceSynchronize();
double end = gettime();
printf("Finished %d iterations and spent %lf seconds.\n", niter, end - start);
#ifdef OUTPUT
//Printing output
printf("Printing Output:\n");
for( int i = 0 ; i < rows ; i++){
for ( int j = 0 ; j < cols ; j++){
printf("%.5f ", J[i * cols + j]);
}
printf("\n");
}
#endif
printf("Computation Done\n");
free(I);
free(J);
#ifdef CPU
free(iN); free(iS); free(jW); free(jE);
free(dN); free(dS); free(dW); free(dE);
#endif
#ifdef GPU
hipFree(C_cuda);
hipFree(J_cuda);
hipFree(E_C);
hipFree(W_C);
hipFree(N_C);
hipFree(S_C);
#endif
free(c);
}
void random_matrix(float *I, int rows, int cols){
srand(7);
for( int i = 0 ; i < rows ; i++){
for ( int j = 0 ; j < cols ; j++){
I[i * cols + j] = rand()/(float)RAND_MAX ;
}
}
}
| 84d1bbe300d3b3bcb15b6c37b54c56d41d1c2c39.cu | // includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include "srad.h"
#include <sys/time.h>
double gettime() {
struct timeval t;
gettimeofday(&t,NULL);
return t.tv_sec+t.tv_usec*1e-6;
}
// includes, project
#include <cuda.h>
// includes, kernels
#include "srad_kernel.cu"
void random_matrix(float *I, int rows, int cols);
void runTest( int argc, char** argv);
void usage(int argc, char **argv)
{
fprintf(stderr, "Usage: %s <rows> <cols> <y1> <y2> <x1> <x2> <lamda> <no. of iter>\n", argv[0]);
fprintf(stderr, "\t<rows> - number of rows\n");
fprintf(stderr, "\t<cols> - number of cols\n");
fprintf(stderr, "\t<y1> - y1 value of the speckle\n");
fprintf(stderr, "\t<y2> - y2 value of the speckle\n");
fprintf(stderr, "\t<x1> - x1 value of the speckle\n");
fprintf(stderr, "\t<x2> - x2 value of the speckle\n");
fprintf(stderr, "\t<lamda> - lambda (0,1)\n");
fprintf(stderr, "\t<no. of iter> - number of iterations\n");
fprintf(stderr, "\t<Used Device> - GPU Device ID\n");
exit(1);
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
printf("WG size of kernel = %d X %d\n", BLOCK_SIZE, BLOCK_SIZE);
runTest( argc, argv);
return EXIT_SUCCESS;
}
void
runTest( int argc, char** argv)
{
int rows, cols, size_I, size_R, niter = 10, iter;
int dev_id = 0;
float *I, *J, lambda, q0sqr, sum, sum2, tmp, meanROI,varROI ;
#ifdef CPU
float Jc, G2, L, num, den, qsqr;
int *iN,*iS,*jE,*jW, k;
float *dN,*dS,*dW,*dE;
float cN,cS,cW,cE,D;
#endif
#ifdef GPU
float *J_cuda;
float *C_cuda;
float *E_C, *W_C, *N_C, *S_C;
#endif
unsigned int r1, r2, c1, c2;
float *c;
if (argc == 10)
{
rows = atoi(argv[1]); //number of rows in the domain
cols = atoi(argv[2]); //number of cols in the domain
if ((rows%16!=0) || (cols%16!=0)){
fprintf(stderr, "rows and cols must be multiples of 16\n");
exit(1);
}
r1 = atoi(argv[3]); //y1 position of the speckle
r2 = atoi(argv[4]); //y2 position of the speckle
c1 = atoi(argv[5]); //x1 position of the speckle
c2 = atoi(argv[6]); //x2 position of the speckle
lambda = atof(argv[7]); //Lambda value
niter = atoi(argv[8]); //number of iterations
dev_id = atoi(argv[9]); //Used device
}
else{
usage(argc, argv);
}
cudaDeviceProp prop;
printf("Device ID is %d \n", dev_id);
printf("Choosing CUDA Device...\n");
cudaError_t set_result = cudaSetDevice(dev_id);
printf("Set Result is: %s\n", cudaGetErrorString(set_result));
cudaGetDevice(&dev_id);
cudaGetDeviceProperties(&prop, dev_id);
printf("Name: %s\n", prop.name);
printf("Running %d iterations.\n", niter);
size_I = cols * rows;
size_R = (r2-r1+1)*(c2-c1+1);
I = (float *)malloc( size_I * sizeof(float) );
J = (float *)malloc( size_I * sizeof(float) );
c = (float *)malloc(sizeof(float)* size_I) ;
#ifdef CPU
iN = (int *)malloc(sizeof(unsigned int*) * rows) ;
iS = (int *)malloc(sizeof(unsigned int*) * rows) ;
jW = (int *)malloc(sizeof(unsigned int*) * cols) ;
jE = (int *)malloc(sizeof(unsigned int*) * cols) ;
dN = (float *)malloc(sizeof(float)* size_I) ;
dS = (float *)malloc(sizeof(float)* size_I) ;
dW = (float *)malloc(sizeof(float)* size_I) ;
dE = (float *)malloc(sizeof(float)* size_I) ;
for (int i=0; i< rows; i++) {
iN[i] = i-1;
iS[i] = i+1;
}
for (int j=0; j< cols; j++) {
jW[j] = j-1;
jE[j] = j+1;
}
iN[0] = 0;
iS[rows-1] = rows-1;
jW[0] = 0;
jE[cols-1] = cols-1;
#endif
#ifdef GPU
//Allocate device memory
cudaMalloc((void**)& J_cuda, sizeof(float)* size_I);
cudaMalloc((void**)& C_cuda, sizeof(float)* size_I);
cudaMalloc((void**)& E_C, sizeof(float)* size_I);
cudaMalloc((void**)& W_C, sizeof(float)* size_I);
cudaMalloc((void**)& S_C, sizeof(float)* size_I);
cudaMalloc((void**)& N_C, sizeof(float)* size_I);
#endif
printf("Randomizing the input matrix\n");
//Generate a random matrix
random_matrix(I, rows, cols);
for (int k = 0; k < size_I; k++ ) {
J[k] = (float)exp(I[k]) ;
}
printf("Start the SRAD main loop\n");
double start = gettime();
for (iter=0; iter< niter; iter++){
sum=0; sum2=0;
for (int i=r1; i<=r2; i++) {
for (int j=c1; j<=c2; j++) {
tmp = J[i * cols + j];
sum += tmp ;
sum2 += tmp*tmp;
}
}
meanROI = sum / size_R;
varROI = (sum2 / size_R) - meanROI*meanROI;
q0sqr = varROI / (meanROI*meanROI);
#ifdef CPU
for (int i = 0 ; i < rows ; i++) {
for (int j = 0; j < cols; j++) {
k = i * cols + j;
Jc = J[k];
// directional derivates
dN[k] = J[iN[i] * cols + j] - Jc;
dS[k] = J[iS[i] * cols + j] - Jc;
dW[k] = J[i * cols + jW[j]] - Jc;
dE[k] = J[i * cols + jE[j]] - Jc;
G2 = (dN[k]*dN[k] + dS[k]*dS[k]
+ dW[k]*dW[k] + dE[k]*dE[k]) / (Jc*Jc);
L = (dN[k] + dS[k] + dW[k] + dE[k]) / Jc;
num = (0.5*G2) - ((1.0/16.0)*(L*L)) ;
den = 1 + (.25*L);
qsqr = num/(den*den);
// diffusion coefficent (equ 33)
den = (qsqr-q0sqr) / (q0sqr * (1+q0sqr)) ;
c[k] = 1.0 / (1.0+den) ;
// saturate diffusion coefficent
if (c[k] < 0) {c[k] = 0;}
else if (c[k] > 1) {c[k] = 1;}
}
}
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
// current index
k = i * cols + j;
// diffusion coefficent
cN = c[k];
cS = c[iS[i] * cols + j];
cW = c[k];
cE = c[i * cols + jE[j]];
// divergence (equ 58)
D = cN * dN[k] + cS * dS[k] + cW * dW[k] + cE * dE[k];
// image update (equ 61)
J[k] = J[k] + 0.25*lambda*D;
}
}
#endif // CPU
#ifdef GPU
//Currently the input size must be divided by 16 - the block size
int block_x = cols/BLOCK_SIZE ;
int block_y = rows/BLOCK_SIZE ;
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(block_x , block_y);
//Copy data from main memory to device memory
cudaMemcpy(J_cuda, J, sizeof(float) * size_I, cudaMemcpyHostToDevice);
//Run kernels
srad_cuda_1<<<dimGrid, dimBlock>>>(E_C, W_C, N_C, S_C, J_cuda, C_cuda, cols, rows, q0sqr);
srad_cuda_2<<<dimGrid, dimBlock>>>(E_C, W_C, N_C, S_C, J_cuda, C_cuda, cols, rows, lambda, q0sqr);
//Copy data from device memory to main memory
cudaMemcpy(J, J_cuda, sizeof(float) * size_I, cudaMemcpyDeviceToHost);
#endif
}
cudaThreadSynchronize();
double end = gettime();
printf("Finished %d iterations and spent %lf seconds.\n", niter, end - start);
#ifdef OUTPUT
//Printing output
printf("Printing Output:\n");
for( int i = 0 ; i < rows ; i++){
for ( int j = 0 ; j < cols ; j++){
printf("%.5f ", J[i * cols + j]);
}
printf("\n");
}
#endif
printf("Computation Done\n");
free(I);
free(J);
#ifdef CPU
free(iN); free(iS); free(jW); free(jE);
free(dN); free(dS); free(dW); free(dE);
#endif
#ifdef GPU
cudaFree(C_cuda);
cudaFree(J_cuda);
cudaFree(E_C);
cudaFree(W_C);
cudaFree(N_C);
cudaFree(S_C);
#endif
free(c);
}
void random_matrix(float *I, int rows, int cols){
srand(7);
for( int i = 0 ; i < rows ; i++){
for ( int j = 0 ; j < cols ; j++){
I[i * cols + j] = rand()/(float)RAND_MAX ;
}
}
}
|
d64d1674ac0d57ae59da73c27897f3af8d0008fc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include <stdio.h>
#include "box2d2r-256-5-256_kernel.hu"
#define BENCH_DIM 2
#define BENCH_FPP 49
#define BENCH_RAD 2
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize])A1;
if (scop) {
if (dimsize >= 5 && timestep >= 1) {
#define cudaCheckReturn(ret) \
do { \
hipError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != hipSuccess) { \
fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == hipSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(hipGetLastError()); \
} while(0)
float *dev_A;
cudaCheckReturn(hipMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float)));
{
cudaCheckReturn(hipMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), hipMemcpyHostToDevice));
#ifdef STENCILBENCH
hipDeviceSynchronize();
SB_START_INSTRUMENTS;
#endif
}
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
AN5D_TYPE c0;
AN5D_TYPE __side0LenMax;
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 236;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0;
__side0LenMax = __side0Len;
for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1)
{
hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2))
{
if (__c0Len % __side0LenMax == 0)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 244;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 1)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 2)
{
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 3)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 4)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
}
else if (__c0Len % __side0LenMax)
{
if (__c0Len % __side0LenMax == 1)
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 2)
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 3)
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 244;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 4)
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 240;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
}
cudaCheckKernel();
{
#ifdef STENCILBENCH
hipDeviceSynchronize();
SB_STOP_INSTRUMENTS;
#endif
cudaCheckReturn(hipMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), hipMemcpyDeviceToHost));
}
cudaCheckReturn(hipFree(dev_A));
}
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
A[(t+1)%2][i][j] =
0.03125f * A[t%2][i-2][j-2] +
0.03126f * A[t%2][i-2][j-1] +
0.03127f * A[t%2][i-2][j] +
0.03128f * A[t%2][i-2][j+1] +
0.03129f * A[t%2][i-2][j+2] +
0.03130f * A[t%2][i-1][j-2] +
0.03131f * A[t%2][i-1][j-1] +
0.03132f * A[t%2][i-1][j] +
0.03133f * A[t%2][i-1][j+1] +
0.03134f * A[t%2][i-1][j+2] +
0.03135f * A[t%2][i][j-2] +
0.03136f * A[t%2][i][j-1] +
0.24712f * A[t%2][i][j] +
0.03138f * A[t%2][i][j+1] +
0.03139f * A[t%2][i][j+2] +
0.03140f * A[t%2][i+1][j-2] +
0.03141f * A[t%2][i+1][j-1] +
0.03142f * A[t%2][i+1][j] +
0.03143f * A[t%2][i+1][j+1] +
0.03144f * A[t%2][i+1][j+2] +
0.03145f * A[t%2][i+2][j-2] +
0.03146f * A[t%2][i+2][j-1] +
0.03147f * A[t%2][i+2][j] +
0.03148f * A[t%2][i+2][j+1] +
0.03149f * A[t%2][i+2][j+2];
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
| d64d1674ac0d57ae59da73c27897f3af8d0008fc.cu | #include <assert.h>
#include <stdio.h>
#include "box2d2r-256-5-256_kernel.hu"
#define BENCH_DIM 2
#define BENCH_FPP 49
#define BENCH_RAD 2
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize])A1;
if (scop) {
if (dimsize >= 5 && timestep >= 1) {
#define cudaCheckReturn(ret) \
do { \
cudaError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != cudaSuccess) { \
fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == cudaSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(cudaGetLastError()); \
} while(0)
float *dev_A;
cudaCheckReturn(cudaMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float)));
{
cudaCheckReturn(cudaMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), cudaMemcpyHostToDevice));
#ifdef STENCILBENCH
cudaDeviceSynchronize();
SB_START_INSTRUMENTS;
#endif
}
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
AN5D_TYPE c0;
AN5D_TYPE __side0LenMax;
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 236;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0;
__side0LenMax = __side0Len;
for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1)
{
kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2))
{
if (__c0Len % __side0LenMax == 0)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 244;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 1)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 2)
{
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 3)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 4)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
}
else if (__c0Len % __side0LenMax)
{
if (__c0Len % __side0LenMax == 1)
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 2)
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 3)
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 244;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 4)
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 240;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
}
cudaCheckKernel();
{
#ifdef STENCILBENCH
cudaDeviceSynchronize();
SB_STOP_INSTRUMENTS;
#endif
cudaCheckReturn(cudaMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), cudaMemcpyDeviceToHost));
}
cudaCheckReturn(cudaFree(dev_A));
}
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
A[(t+1)%2][i][j] =
0.03125f * A[t%2][i-2][j-2] +
0.03126f * A[t%2][i-2][j-1] +
0.03127f * A[t%2][i-2][j] +
0.03128f * A[t%2][i-2][j+1] +
0.03129f * A[t%2][i-2][j+2] +
0.03130f * A[t%2][i-1][j-2] +
0.03131f * A[t%2][i-1][j-1] +
0.03132f * A[t%2][i-1][j] +
0.03133f * A[t%2][i-1][j+1] +
0.03134f * A[t%2][i-1][j+2] +
0.03135f * A[t%2][i][j-2] +
0.03136f * A[t%2][i][j-1] +
0.24712f * A[t%2][i][j] +
0.03138f * A[t%2][i][j+1] +
0.03139f * A[t%2][i][j+2] +
0.03140f * A[t%2][i+1][j-2] +
0.03141f * A[t%2][i+1][j-1] +
0.03142f * A[t%2][i+1][j] +
0.03143f * A[t%2][i+1][j+1] +
0.03144f * A[t%2][i+1][j+2] +
0.03145f * A[t%2][i+2][j-2] +
0.03146f * A[t%2][i+2][j-1] +
0.03147f * A[t%2][i+2][j] +
0.03148f * A[t%2][i+2][j+1] +
0.03149f * A[t%2][i+2][j+2];
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
|
b095a741079c7ef12abc192e213d12e194e11913.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "orc_common.h"
#include "orc_gpu.h"
#include <cudf/table/table_device_view.cuh>
#include <io/utilities/block_utils.cuh>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#include <thrust/sort.h>
namespace cudf {
namespace io {
namespace orc {
namespace gpu {
constexpr uint32_t max_dict_entries = default_row_index_stride;
constexpr int init_hash_bits = 12;
struct dictinit_state_s {
uint32_t nnz;
uint32_t total_dupes;
DictionaryChunk chunk;
volatile uint32_t scratch_red[32];
uint32_t dict[max_dict_entries];
union {
uint16_t u16[1 << (init_hash_bits)];
uint32_t u32[1 << (init_hash_bits - 1)];
} map;
};
/**
* @brief Return a 12-bit hash from a string
*/
static inline __device__ uint32_t hash_string(const string_view val)
{
if (val.empty()) {
return 0;
} else {
char const *ptr = val.data();
uint32_t len = val.size_bytes();
return (ptr[0] + (ptr[len - 1] << 5) + (len << 10)) & ((1 << init_hash_bits) - 1);
}
}
/**
* @brief Fill dictionary with the indices of non-null rows
*
* @param[in,out] s dictionary builder state
* @param[in] t thread id
* @param[in] temp_storage shared memory storage to scan non-null positions
*/
template <int block_size, typename Storage>
static __device__ void LoadNonNullIndices(volatile dictinit_state_s *s,
int t,
Storage &temp_storage)
{
if (t == 0) { s->nnz = 0; }
for (uint32_t i = 0; i < s->chunk.num_rows; i += block_size) {
const uint32_t *valid_map = s->chunk.leaf_column->null_mask();
auto column_offset = s->chunk.leaf_column->offset();
uint32_t is_valid, nz_pos;
if (t < block_size / 32) {
if (!valid_map) {
s->scratch_red[t] = 0xffffffffu;
} else {
uint32_t const row = s->chunk.start_row + i + t * 32;
auto const chunk_end = s->chunk.start_row + s->chunk.num_rows;
auto const valid_map_idx = (row + column_offset) / 32;
uint32_t valid = (row < chunk_end) ? valid_map[valid_map_idx] : 0;
auto const rows_in_next_word = (row + column_offset) & 0x1f;
if (rows_in_next_word != 0) {
auto const rows_in_current_word = 32 - rows_in_next_word;
// Read next word if any rows are within the chunk
uint32_t const valid_next =
(row + rows_in_current_word < chunk_end) ? valid_map[valid_map_idx + 1] : 0;
valid = __funnelshift_r(valid, valid_next, rows_in_next_word);
}
s->scratch_red[t] = valid;
}
}
__syncthreads();
is_valid = (i + t < s->chunk.num_rows) ? (s->scratch_red[t >> 5] >> (t & 0x1f)) & 1 : 0;
uint32_t tmp_nnz;
hipcub::BlockScan<uint32_t, block_size, cub::BLOCK_SCAN_WARP_SCANS>(temp_storage)
.ExclusiveSum(is_valid, nz_pos, tmp_nnz);
nz_pos += s->nnz;
__syncthreads();
if (!t) { s->nnz += tmp_nnz; }
if (is_valid) { s->dict[nz_pos] = i + t; }
__syncthreads();
}
}
/**
* @brief Gather all non-NULL string rows and compute total character data size
*
* @param[in] chunks DictionaryChunk device array [rowgroup][column]
* @param[in] num_columns Number of string columns
*/
// blockDim {block_size,1,1}
template <int block_size>
__global__ void __launch_bounds__(block_size, 2)
gpuInitDictionaryIndices(DictionaryChunk *chunks,
const table_device_view view,
uint32_t *dict_data,
uint32_t *dict_index,
size_t row_index_stride,
size_type *str_col_ids,
uint32_t num_columns)
{
__shared__ __align__(16) dictinit_state_s state_g;
using block_reduce = hipcub::BlockReduce<uint32_t, block_size>;
using block_scan = hipcub::BlockScan<uint32_t, block_size, cub::BLOCK_SCAN_WARP_SCANS>;
__shared__ union {
typename block_reduce::TempStorage reduce_storage;
typename block_scan::TempStorage scan_storage;
} temp_storage;
dictinit_state_s *const s = &state_g;
uint32_t col_id = blockIdx.x;
uint32_t group_id = blockIdx.y;
uint32_t nnz, start_row, dict_char_count;
int t = threadIdx.x;
if (t == 0) {
column_device_view *leaf_column_view = view.begin() + str_col_ids[col_id];
s->chunk = chunks[group_id * num_columns + col_id];
s->chunk.leaf_column = leaf_column_view;
s->chunk.dict_data =
dict_data + col_id * leaf_column_view->size() + group_id * row_index_stride;
s->chunk.dict_index = dict_index + col_id * leaf_column_view->size();
s->chunk.start_row = group_id * row_index_stride;
s->chunk.num_rows =
min(row_index_stride,
max(static_cast<size_t>(leaf_column_view->size() - s->chunk.start_row), size_t{0}));
}
for (uint32_t i = 0; i < sizeof(s->map) / sizeof(uint32_t); i += block_size) {
if (i + t < sizeof(s->map) / sizeof(uint32_t)) s->map.u32[i + t] = 0;
}
__syncthreads();
// First, take care of NULLs, and count how many strings we have (TODO: bypass this step when
// there are no nulls)
LoadNonNullIndices<block_size>(s, t, temp_storage.scan_storage);
// Sum the lengths of all the strings
if (t == 0) {
s->chunk.string_char_count = 0;
s->total_dupes = 0;
}
nnz = s->nnz;
dict_data = s->chunk.dict_data;
start_row = s->chunk.start_row;
for (uint32_t i = 0; i < nnz; i += block_size) {
uint32_t ck_row = 0;
uint32_t hash = 0;
uint32_t len = 0;
if (i + t < nnz) {
ck_row = s->dict[i + t];
string_view string_val = s->chunk.leaf_column->element<string_view>(ck_row + start_row);
len = static_cast<uint32_t>(string_val.size_bytes());
hash = hash_string(string_val);
}
len = block_reduce(temp_storage.reduce_storage).Sum(len);
if (t == 0) s->chunk.string_char_count += len;
if (i + t < nnz) {
atomicAdd(&s->map.u32[hash >> 1], 1 << ((hash & 1) ? 16 : 0));
dict_data[i + t] = start_row + ck_row;
}
__syncthreads();
}
// Reorder the 16-bit local indices according to the hash value of the strings
static_assert((init_hash_bits == 12), "Hardcoded for init_hash_bits=12");
{
// Cumulative sum of hash map counts
uint32_t count01 = s->map.u32[t * 4 + 0];
uint32_t count23 = s->map.u32[t * 4 + 1];
uint32_t count45 = s->map.u32[t * 4 + 2];
uint32_t count67 = s->map.u32[t * 4 + 3];
uint32_t sum01 = count01 + (count01 << 16);
uint32_t sum23 = count23 + (count23 << 16);
uint32_t sum45 = count45 + (count45 << 16);
uint32_t sum67 = count67 + (count67 << 16);
sum23 += (sum01 >> 16) * 0x10001;
sum45 += (sum23 >> 16) * 0x10001;
sum67 += (sum45 >> 16) * 0x10001;
uint32_t sum_w = sum67 >> 16;
block_scan(temp_storage.scan_storage).InclusiveSum(sum_w, sum_w);
__syncthreads();
sum_w = (sum_w - (sum67 >> 16)) * 0x10001;
s->map.u32[t * 4 + 0] = sum_w + sum01 - count01;
s->map.u32[t * 4 + 1] = sum_w + sum23 - count23;
s->map.u32[t * 4 + 2] = sum_w + sum45 - count45;
s->map.u32[t * 4 + 3] = sum_w + sum67 - count67;
__syncthreads();
}
// Put the indices back in hash order
for (uint32_t i = 0; i < nnz; i += block_size) {
uint32_t ck_row = 0, pos = 0, hash = 0, pos_old, pos_new, sh, colliding_row;
bool collision;
if (i + t < nnz) {
ck_row = dict_data[i + t] - start_row;
string_view string_val = s->chunk.leaf_column->element<string_view>(ck_row + start_row);
hash = hash_string(string_val);
sh = (hash & 1) ? 16 : 0;
pos_old = s->map.u16[hash];
}
// The isolation of the atomicAdd, along with pos_old/pos_new is to guarantee deterministic
// behavior for the first row in the hash map that will be used for early duplicate detection
__syncthreads();
if (i + t < nnz) {
pos = (atomicAdd(&s->map.u32[hash >> 1], 1 << sh) >> sh) & 0xffff;
s->dict[pos] = ck_row;
}
__syncthreads();
collision = false;
if (i + t < nnz) {
pos_new = s->map.u16[hash];
collision = (pos != pos_old && pos_new > pos_old + 1);
if (collision) { colliding_row = s->dict[pos_old]; }
}
__syncthreads();
if (collision) { atomicMin(s->dict + pos_old, ck_row); }
__syncthreads();
// Resolve collision
if (collision && ck_row == s->dict[pos_old]) { s->dict[pos] = colliding_row; }
}
__syncthreads();
// Now that the strings are ordered by hash, compare every string with the first entry in the hash
// map, the position of the first string can be inferred from the hash map counts
dict_char_count = 0;
for (uint32_t i = 0; i < nnz; i += block_size) {
uint32_t ck_row = 0, ck_row_ref = 0, is_dupe = 0;
if (i + t < nnz) {
ck_row = s->dict[i + t];
string_view string_value = s->chunk.leaf_column->element<string_view>(ck_row + start_row);
auto const string_length = static_cast<uint32_t>(string_value.size_bytes());
auto const hash = hash_string(string_value);
ck_row_ref = s->dict[(hash > 0) ? s->map.u16[hash - 1] : 0];
if (ck_row_ref != ck_row) {
string_view reference_string =
s->chunk.leaf_column->element<string_view>(ck_row_ref + start_row);
is_dupe = (string_value == reference_string);
dict_char_count += (is_dupe) ? 0 : string_length;
}
}
uint32_t dupes_in_block;
uint32_t dupes_before;
block_scan(temp_storage.scan_storage).InclusiveSum(is_dupe, dupes_before, dupes_in_block);
dupes_before += s->total_dupes;
__syncthreads();
if (!t) { s->total_dupes += dupes_in_block; }
if (i + t < nnz) {
if (!is_dupe) {
dict_data[i + t - dupes_before] = ck_row + start_row;
} else {
s->chunk.dict_index[ck_row + start_row] = (ck_row_ref + start_row) | (1u << 31);
}
}
}
// temp_storage is being used twice, so make sure there is `__syncthreads()` between them
// while making any future changes.
dict_char_count = block_reduce(temp_storage.reduce_storage).Sum(dict_char_count);
if (!t) {
chunks[group_id * num_columns + col_id].num_strings = nnz;
chunks[group_id * num_columns + col_id].string_char_count = s->chunk.string_char_count;
chunks[group_id * num_columns + col_id].num_dict_strings = nnz - s->total_dupes;
chunks[group_id * num_columns + col_id].dict_char_count = dict_char_count;
chunks[group_id * num_columns + col_id].leaf_column = s->chunk.leaf_column;
chunks[group_id * num_columns + col_id].dict_data = s->chunk.dict_data;
chunks[group_id * num_columns + col_id].dict_index = s->chunk.dict_index;
chunks[group_id * num_columns + col_id].start_row = s->chunk.start_row;
chunks[group_id * num_columns + col_id].num_rows = s->chunk.num_rows;
}
}
/**
* @brief In-place concatenate dictionary data for all chunks in each stripe
*
* @param[in] stripes StripeDictionary device array [stripe][column]
* @param[in] chunks DictionaryChunk device array [rowgroup][column]
* @param[in] num_columns Number of columns
*/
// blockDim {1024,1,1}
extern "C" __global__ void __launch_bounds__(1024)
gpuCompactChunkDictionaries(StripeDictionary *stripes,
DictionaryChunk const *chunks,
uint32_t num_columns)
{
__shared__ __align__(16) StripeDictionary stripe_g;
__shared__ __align__(16) DictionaryChunk chunk_g;
__shared__ const uint32_t *volatile ck_curptr_g;
__shared__ uint32_t volatile ck_curlen_g;
uint32_t col_id = blockIdx.x;
uint32_t stripe_id = blockIdx.y;
uint32_t chunk_len;
int t = threadIdx.x;
const uint32_t *src;
uint32_t *dst;
if (t == 0) stripe_g = stripes[stripe_id * num_columns + col_id];
__syncthreads();
if (!stripe_g.dict_data) { return; }
if (t == 0) chunk_g = chunks[stripe_g.start_chunk * num_columns + col_id];
__syncthreads();
dst = stripe_g.dict_data + chunk_g.num_dict_strings;
for (uint32_t g = 1; g < stripe_g.num_chunks; g++) {
if (!t) {
src = chunks[(stripe_g.start_chunk + g) * num_columns + col_id].dict_data;
chunk_len = chunks[(stripe_g.start_chunk + g) * num_columns + col_id].num_dict_strings;
ck_curptr_g = src;
ck_curlen_g = chunk_len;
}
__syncthreads();
src = ck_curptr_g;
chunk_len = ck_curlen_g;
if (src != dst) {
for (uint32_t i = 0; i < chunk_len; i += 1024) {
uint32_t idx = (i + t < chunk_len) ? src[i + t] : 0;
__syncthreads();
if (i + t < chunk_len) dst[i + t] = idx;
}
}
dst += chunk_len;
__syncthreads();
}
}
struct build_state_s {
uint32_t total_dupes;
StripeDictionary stripe;
volatile uint32_t scratch_red[32];
};
/**
* @brief Eliminate duplicates in-place and generate column dictionary index
*
* @param[in] stripes StripeDictionary device array [stripe][column]
* @param[in] num_columns Number of string columns
*/
// NOTE: Prone to poor utilization on small datasets due to 1 block per dictionary
// blockDim {1024,1,1}
template <int block_size>
__global__ void __launch_bounds__(block_size)
gpuBuildStripeDictionaries(StripeDictionary *stripes, uint32_t num_columns)
{
__shared__ __align__(16) build_state_s state_g;
using block_reduce = hipcub::BlockReduce<uint32_t, block_size>;
using block_scan = hipcub::BlockScan<uint32_t, block_size, cub::BLOCK_SCAN_WARP_SCANS>;
__shared__ union {
typename block_reduce::TempStorage reduce_storage;
typename block_scan::TempStorage scan_storage;
} temp_storage;
build_state_s *const s = &state_g;
uint32_t col_id = blockIdx.x;
uint32_t stripe_id = blockIdx.y;
uint32_t num_strings;
uint32_t *dict_data, *dict_index;
uint32_t dict_char_count;
int t = threadIdx.x;
if (t == 0) s->stripe = stripes[stripe_id * num_columns + col_id];
if (t == 31 * 32) { s->total_dupes = 0; }
__syncthreads();
num_strings = s->stripe.num_strings;
dict_data = s->stripe.dict_data;
if (!dict_data) return;
dict_index = s->stripe.dict_index;
string_view current_string = string_view::min();
dict_char_count = 0;
for (uint32_t i = 0; i < num_strings; i += block_size) {
uint32_t cur = (i + t < num_strings) ? dict_data[i + t] : 0;
uint32_t cur_len = 0;
bool is_dupe = false;
if (i + t < num_strings) {
current_string = s->stripe.leaf_column->element<string_view>(cur);
cur_len = current_string.size_bytes();
}
if (i + t != 0 && i + t < num_strings) {
uint32_t prev = dict_data[i + t - 1];
is_dupe = (current_string == (s->stripe.leaf_column->element<string_view>(prev)));
}
dict_char_count += (is_dupe) ? 0 : cur_len;
uint32_t dupes_in_block;
uint32_t dupes_before;
block_scan(temp_storage.scan_storage).InclusiveSum(is_dupe, dupes_before, dupes_in_block);
dupes_before += s->total_dupes;
__syncthreads();
if (!t) { s->total_dupes += dupes_in_block; }
if (i + t < num_strings) {
dict_index[cur] = i + t - dupes_before;
if (!is_dupe && dupes_before != 0) { dict_data[i + t - dupes_before] = cur; }
}
__syncthreads();
}
dict_char_count = block_reduce(temp_storage.reduce_storage).Sum(dict_char_count);
if (t == 0) {
stripes[stripe_id * num_columns + col_id].num_strings = num_strings - s->total_dupes;
stripes[stripe_id * num_columns + col_id].dict_char_count = dict_char_count;
}
}
/**
* @copydoc cudf::io::orc::gpu::InitDictionaryIndices
*/
void InitDictionaryIndices(const table_device_view &view,
DictionaryChunk *chunks,
uint32_t *dict_data,
uint32_t *dict_index,
size_t row_index_stride,
size_type *str_col_ids,
uint32_t num_columns,
uint32_t num_rowgroups,
rmm::cuda_stream_view stream)
{
static constexpr int block_size = 512;
dim3 dim_block(block_size, 1);
dim3 dim_grid(num_columns, num_rowgroups);
hipLaunchKernelGGL(( gpuInitDictionaryIndices<block_size>), dim3(dim_grid), dim3(dim_block), 0, stream.value(),
chunks, view, dict_data, dict_index, row_index_stride, str_col_ids, num_columns);
}
/**
* @copydoc cudf::io::orc::gpu::BuildStripeDictionaries
*/
void BuildStripeDictionaries(StripeDictionary *stripes,
StripeDictionary *stripes_host,
DictionaryChunk const *chunks,
uint32_t num_stripes,
uint32_t num_rowgroups,
uint32_t num_columns,
rmm::cuda_stream_view stream)
{
dim3 dim_block(1024, 1); // 1024 threads per chunk
dim3 dim_grid_build(num_columns, num_stripes);
hipLaunchKernelGGL(( gpuCompactChunkDictionaries), dim3(dim_grid_build), dim3(dim_block), 0, stream.value(),
stripes, chunks, num_columns);
for (uint32_t i = 0; i < num_stripes * num_columns; i++) {
if (stripes_host[i].dict_data != nullptr) {
thrust::device_ptr<uint32_t> dict_data_ptr =
thrust::device_pointer_cast(stripes_host[i].dict_data);
column_device_view *string_column = stripes_host[i].leaf_column;
// NOTE: Requires the --expt-extended-lambda nvcc flag
thrust::sort(rmm::exec_policy(stream),
dict_data_ptr,
dict_data_ptr + stripes_host[i].num_strings,
[string_column] __device__(const uint32_t &lhs, const uint32_t &rhs) {
return string_column->element<string_view>(lhs) <
string_column->element<string_view>(rhs);
});
}
}
hipLaunchKernelGGL(( gpuBuildStripeDictionaries<1024>)
, dim3(dim_grid_build), dim3(dim_block), 0, stream.value(), stripes, num_columns);
}
} // namespace gpu
} // namespace orc
} // namespace io
} // namespace cudf
| b095a741079c7ef12abc192e213d12e194e11913.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "orc_common.h"
#include "orc_gpu.h"
#include <cudf/table/table_device_view.cuh>
#include <io/utilities/block_utils.cuh>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#include <thrust/sort.h>
namespace cudf {
namespace io {
namespace orc {
namespace gpu {
constexpr uint32_t max_dict_entries = default_row_index_stride;
constexpr int init_hash_bits = 12;
struct dictinit_state_s {
uint32_t nnz;
uint32_t total_dupes;
DictionaryChunk chunk;
volatile uint32_t scratch_red[32];
uint32_t dict[max_dict_entries];
union {
uint16_t u16[1 << (init_hash_bits)];
uint32_t u32[1 << (init_hash_bits - 1)];
} map;
};
/**
* @brief Return a 12-bit hash from a string
*/
static inline __device__ uint32_t hash_string(const string_view val)
{
if (val.empty()) {
return 0;
} else {
char const *ptr = val.data();
uint32_t len = val.size_bytes();
return (ptr[0] + (ptr[len - 1] << 5) + (len << 10)) & ((1 << init_hash_bits) - 1);
}
}
/**
* @brief Fill dictionary with the indices of non-null rows
*
* @param[in,out] s dictionary builder state
* @param[in] t thread id
* @param[in] temp_storage shared memory storage to scan non-null positions
*/
template <int block_size, typename Storage>
static __device__ void LoadNonNullIndices(volatile dictinit_state_s *s,
int t,
Storage &temp_storage)
{
if (t == 0) { s->nnz = 0; }
for (uint32_t i = 0; i < s->chunk.num_rows; i += block_size) {
const uint32_t *valid_map = s->chunk.leaf_column->null_mask();
auto column_offset = s->chunk.leaf_column->offset();
uint32_t is_valid, nz_pos;
if (t < block_size / 32) {
if (!valid_map) {
s->scratch_red[t] = 0xffffffffu;
} else {
uint32_t const row = s->chunk.start_row + i + t * 32;
auto const chunk_end = s->chunk.start_row + s->chunk.num_rows;
auto const valid_map_idx = (row + column_offset) / 32;
uint32_t valid = (row < chunk_end) ? valid_map[valid_map_idx] : 0;
auto const rows_in_next_word = (row + column_offset) & 0x1f;
if (rows_in_next_word != 0) {
auto const rows_in_current_word = 32 - rows_in_next_word;
// Read next word if any rows are within the chunk
uint32_t const valid_next =
(row + rows_in_current_word < chunk_end) ? valid_map[valid_map_idx + 1] : 0;
valid = __funnelshift_r(valid, valid_next, rows_in_next_word);
}
s->scratch_red[t] = valid;
}
}
__syncthreads();
is_valid = (i + t < s->chunk.num_rows) ? (s->scratch_red[t >> 5] >> (t & 0x1f)) & 1 : 0;
uint32_t tmp_nnz;
cub::BlockScan<uint32_t, block_size, cub::BLOCK_SCAN_WARP_SCANS>(temp_storage)
.ExclusiveSum(is_valid, nz_pos, tmp_nnz);
nz_pos += s->nnz;
__syncthreads();
if (!t) { s->nnz += tmp_nnz; }
if (is_valid) { s->dict[nz_pos] = i + t; }
__syncthreads();
}
}
/**
* @brief Gather all non-NULL string rows and compute total character data size
*
* @param[in] chunks DictionaryChunk device array [rowgroup][column]
* @param[in] num_columns Number of string columns
*/
// blockDim {block_size,1,1}
template <int block_size>
__global__ void __launch_bounds__(block_size, 2)
gpuInitDictionaryIndices(DictionaryChunk *chunks,
const table_device_view view,
uint32_t *dict_data,
uint32_t *dict_index,
size_t row_index_stride,
size_type *str_col_ids,
uint32_t num_columns)
{
__shared__ __align__(16) dictinit_state_s state_g;
using block_reduce = cub::BlockReduce<uint32_t, block_size>;
using block_scan = cub::BlockScan<uint32_t, block_size, cub::BLOCK_SCAN_WARP_SCANS>;
__shared__ union {
typename block_reduce::TempStorage reduce_storage;
typename block_scan::TempStorage scan_storage;
} temp_storage;
dictinit_state_s *const s = &state_g;
uint32_t col_id = blockIdx.x;
uint32_t group_id = blockIdx.y;
uint32_t nnz, start_row, dict_char_count;
int t = threadIdx.x;
if (t == 0) {
column_device_view *leaf_column_view = view.begin() + str_col_ids[col_id];
s->chunk = chunks[group_id * num_columns + col_id];
s->chunk.leaf_column = leaf_column_view;
s->chunk.dict_data =
dict_data + col_id * leaf_column_view->size() + group_id * row_index_stride;
s->chunk.dict_index = dict_index + col_id * leaf_column_view->size();
s->chunk.start_row = group_id * row_index_stride;
s->chunk.num_rows =
min(row_index_stride,
max(static_cast<size_t>(leaf_column_view->size() - s->chunk.start_row), size_t{0}));
}
for (uint32_t i = 0; i < sizeof(s->map) / sizeof(uint32_t); i += block_size) {
if (i + t < sizeof(s->map) / sizeof(uint32_t)) s->map.u32[i + t] = 0;
}
__syncthreads();
// First, take care of NULLs, and count how many strings we have (TODO: bypass this step when
// there are no nulls)
LoadNonNullIndices<block_size>(s, t, temp_storage.scan_storage);
// Sum the lengths of all the strings
if (t == 0) {
s->chunk.string_char_count = 0;
s->total_dupes = 0;
}
nnz = s->nnz;
dict_data = s->chunk.dict_data;
start_row = s->chunk.start_row;
for (uint32_t i = 0; i < nnz; i += block_size) {
uint32_t ck_row = 0;
uint32_t hash = 0;
uint32_t len = 0;
if (i + t < nnz) {
ck_row = s->dict[i + t];
string_view string_val = s->chunk.leaf_column->element<string_view>(ck_row + start_row);
len = static_cast<uint32_t>(string_val.size_bytes());
hash = hash_string(string_val);
}
len = block_reduce(temp_storage.reduce_storage).Sum(len);
if (t == 0) s->chunk.string_char_count += len;
if (i + t < nnz) {
atomicAdd(&s->map.u32[hash >> 1], 1 << ((hash & 1) ? 16 : 0));
dict_data[i + t] = start_row + ck_row;
}
__syncthreads();
}
// Reorder the 16-bit local indices according to the hash value of the strings
static_assert((init_hash_bits == 12), "Hardcoded for init_hash_bits=12");
{
// Cumulative sum of hash map counts
uint32_t count01 = s->map.u32[t * 4 + 0];
uint32_t count23 = s->map.u32[t * 4 + 1];
uint32_t count45 = s->map.u32[t * 4 + 2];
uint32_t count67 = s->map.u32[t * 4 + 3];
uint32_t sum01 = count01 + (count01 << 16);
uint32_t sum23 = count23 + (count23 << 16);
uint32_t sum45 = count45 + (count45 << 16);
uint32_t sum67 = count67 + (count67 << 16);
sum23 += (sum01 >> 16) * 0x10001;
sum45 += (sum23 >> 16) * 0x10001;
sum67 += (sum45 >> 16) * 0x10001;
uint32_t sum_w = sum67 >> 16;
block_scan(temp_storage.scan_storage).InclusiveSum(sum_w, sum_w);
__syncthreads();
sum_w = (sum_w - (sum67 >> 16)) * 0x10001;
s->map.u32[t * 4 + 0] = sum_w + sum01 - count01;
s->map.u32[t * 4 + 1] = sum_w + sum23 - count23;
s->map.u32[t * 4 + 2] = sum_w + sum45 - count45;
s->map.u32[t * 4 + 3] = sum_w + sum67 - count67;
__syncthreads();
}
// Put the indices back in hash order
for (uint32_t i = 0; i < nnz; i += block_size) {
uint32_t ck_row = 0, pos = 0, hash = 0, pos_old, pos_new, sh, colliding_row;
bool collision;
if (i + t < nnz) {
ck_row = dict_data[i + t] - start_row;
string_view string_val = s->chunk.leaf_column->element<string_view>(ck_row + start_row);
hash = hash_string(string_val);
sh = (hash & 1) ? 16 : 0;
pos_old = s->map.u16[hash];
}
// The isolation of the atomicAdd, along with pos_old/pos_new is to guarantee deterministic
// behavior for the first row in the hash map that will be used for early duplicate detection
__syncthreads();
if (i + t < nnz) {
pos = (atomicAdd(&s->map.u32[hash >> 1], 1 << sh) >> sh) & 0xffff;
s->dict[pos] = ck_row;
}
__syncthreads();
collision = false;
if (i + t < nnz) {
pos_new = s->map.u16[hash];
collision = (pos != pos_old && pos_new > pos_old + 1);
if (collision) { colliding_row = s->dict[pos_old]; }
}
__syncthreads();
if (collision) { atomicMin(s->dict + pos_old, ck_row); }
__syncthreads();
// Resolve collision
if (collision && ck_row == s->dict[pos_old]) { s->dict[pos] = colliding_row; }
}
__syncthreads();
// Now that the strings are ordered by hash, compare every string with the first entry in the hash
// map, the position of the first string can be inferred from the hash map counts
dict_char_count = 0;
for (uint32_t i = 0; i < nnz; i += block_size) {
uint32_t ck_row = 0, ck_row_ref = 0, is_dupe = 0;
if (i + t < nnz) {
ck_row = s->dict[i + t];
string_view string_value = s->chunk.leaf_column->element<string_view>(ck_row + start_row);
auto const string_length = static_cast<uint32_t>(string_value.size_bytes());
auto const hash = hash_string(string_value);
ck_row_ref = s->dict[(hash > 0) ? s->map.u16[hash - 1] : 0];
if (ck_row_ref != ck_row) {
string_view reference_string =
s->chunk.leaf_column->element<string_view>(ck_row_ref + start_row);
is_dupe = (string_value == reference_string);
dict_char_count += (is_dupe) ? 0 : string_length;
}
}
uint32_t dupes_in_block;
uint32_t dupes_before;
block_scan(temp_storage.scan_storage).InclusiveSum(is_dupe, dupes_before, dupes_in_block);
dupes_before += s->total_dupes;
__syncthreads();
if (!t) { s->total_dupes += dupes_in_block; }
if (i + t < nnz) {
if (!is_dupe) {
dict_data[i + t - dupes_before] = ck_row + start_row;
} else {
s->chunk.dict_index[ck_row + start_row] = (ck_row_ref + start_row) | (1u << 31);
}
}
}
// temp_storage is being used twice, so make sure there is `__syncthreads()` between them
// while making any future changes.
dict_char_count = block_reduce(temp_storage.reduce_storage).Sum(dict_char_count);
if (!t) {
chunks[group_id * num_columns + col_id].num_strings = nnz;
chunks[group_id * num_columns + col_id].string_char_count = s->chunk.string_char_count;
chunks[group_id * num_columns + col_id].num_dict_strings = nnz - s->total_dupes;
chunks[group_id * num_columns + col_id].dict_char_count = dict_char_count;
chunks[group_id * num_columns + col_id].leaf_column = s->chunk.leaf_column;
chunks[group_id * num_columns + col_id].dict_data = s->chunk.dict_data;
chunks[group_id * num_columns + col_id].dict_index = s->chunk.dict_index;
chunks[group_id * num_columns + col_id].start_row = s->chunk.start_row;
chunks[group_id * num_columns + col_id].num_rows = s->chunk.num_rows;
}
}
/**
* @brief In-place concatenate dictionary data for all chunks in each stripe
*
* @param[in] stripes StripeDictionary device array [stripe][column]
* @param[in] chunks DictionaryChunk device array [rowgroup][column]
* @param[in] num_columns Number of columns
*/
// blockDim {1024,1,1}
extern "C" __global__ void __launch_bounds__(1024)
gpuCompactChunkDictionaries(StripeDictionary *stripes,
DictionaryChunk const *chunks,
uint32_t num_columns)
{
__shared__ __align__(16) StripeDictionary stripe_g;
__shared__ __align__(16) DictionaryChunk chunk_g;
__shared__ const uint32_t *volatile ck_curptr_g;
__shared__ uint32_t volatile ck_curlen_g;
uint32_t col_id = blockIdx.x;
uint32_t stripe_id = blockIdx.y;
uint32_t chunk_len;
int t = threadIdx.x;
const uint32_t *src;
uint32_t *dst;
if (t == 0) stripe_g = stripes[stripe_id * num_columns + col_id];
__syncthreads();
if (!stripe_g.dict_data) { return; }
if (t == 0) chunk_g = chunks[stripe_g.start_chunk * num_columns + col_id];
__syncthreads();
dst = stripe_g.dict_data + chunk_g.num_dict_strings;
for (uint32_t g = 1; g < stripe_g.num_chunks; g++) {
if (!t) {
src = chunks[(stripe_g.start_chunk + g) * num_columns + col_id].dict_data;
chunk_len = chunks[(stripe_g.start_chunk + g) * num_columns + col_id].num_dict_strings;
ck_curptr_g = src;
ck_curlen_g = chunk_len;
}
__syncthreads();
src = ck_curptr_g;
chunk_len = ck_curlen_g;
if (src != dst) {
for (uint32_t i = 0; i < chunk_len; i += 1024) {
uint32_t idx = (i + t < chunk_len) ? src[i + t] : 0;
__syncthreads();
if (i + t < chunk_len) dst[i + t] = idx;
}
}
dst += chunk_len;
__syncthreads();
}
}
struct build_state_s {
uint32_t total_dupes;
StripeDictionary stripe;
volatile uint32_t scratch_red[32];
};
/**
* @brief Eliminate duplicates in-place and generate column dictionary index
*
* @param[in] stripes StripeDictionary device array [stripe][column]
* @param[in] num_columns Number of string columns
*/
// NOTE: Prone to poor utilization on small datasets due to 1 block per dictionary
// blockDim {1024,1,1}
template <int block_size>
__global__ void __launch_bounds__(block_size)
gpuBuildStripeDictionaries(StripeDictionary *stripes, uint32_t num_columns)
{
__shared__ __align__(16) build_state_s state_g;
using block_reduce = cub::BlockReduce<uint32_t, block_size>;
using block_scan = cub::BlockScan<uint32_t, block_size, cub::BLOCK_SCAN_WARP_SCANS>;
__shared__ union {
typename block_reduce::TempStorage reduce_storage;
typename block_scan::TempStorage scan_storage;
} temp_storage;
build_state_s *const s = &state_g;
uint32_t col_id = blockIdx.x;
uint32_t stripe_id = blockIdx.y;
uint32_t num_strings;
uint32_t *dict_data, *dict_index;
uint32_t dict_char_count;
int t = threadIdx.x;
if (t == 0) s->stripe = stripes[stripe_id * num_columns + col_id];
if (t == 31 * 32) { s->total_dupes = 0; }
__syncthreads();
num_strings = s->stripe.num_strings;
dict_data = s->stripe.dict_data;
if (!dict_data) return;
dict_index = s->stripe.dict_index;
string_view current_string = string_view::min();
dict_char_count = 0;
for (uint32_t i = 0; i < num_strings; i += block_size) {
uint32_t cur = (i + t < num_strings) ? dict_data[i + t] : 0;
uint32_t cur_len = 0;
bool is_dupe = false;
if (i + t < num_strings) {
current_string = s->stripe.leaf_column->element<string_view>(cur);
cur_len = current_string.size_bytes();
}
if (i + t != 0 && i + t < num_strings) {
uint32_t prev = dict_data[i + t - 1];
is_dupe = (current_string == (s->stripe.leaf_column->element<string_view>(prev)));
}
dict_char_count += (is_dupe) ? 0 : cur_len;
uint32_t dupes_in_block;
uint32_t dupes_before;
block_scan(temp_storage.scan_storage).InclusiveSum(is_dupe, dupes_before, dupes_in_block);
dupes_before += s->total_dupes;
__syncthreads();
if (!t) { s->total_dupes += dupes_in_block; }
if (i + t < num_strings) {
dict_index[cur] = i + t - dupes_before;
if (!is_dupe && dupes_before != 0) { dict_data[i + t - dupes_before] = cur; }
}
__syncthreads();
}
dict_char_count = block_reduce(temp_storage.reduce_storage).Sum(dict_char_count);
if (t == 0) {
stripes[stripe_id * num_columns + col_id].num_strings = num_strings - s->total_dupes;
stripes[stripe_id * num_columns + col_id].dict_char_count = dict_char_count;
}
}
/**
* @copydoc cudf::io::orc::gpu::InitDictionaryIndices
*/
void InitDictionaryIndices(const table_device_view &view,
DictionaryChunk *chunks,
uint32_t *dict_data,
uint32_t *dict_index,
size_t row_index_stride,
size_type *str_col_ids,
uint32_t num_columns,
uint32_t num_rowgroups,
rmm::cuda_stream_view stream)
{
static constexpr int block_size = 512;
dim3 dim_block(block_size, 1);
dim3 dim_grid(num_columns, num_rowgroups);
gpuInitDictionaryIndices<block_size><<<dim_grid, dim_block, 0, stream.value()>>>(
chunks, view, dict_data, dict_index, row_index_stride, str_col_ids, num_columns);
}
/**
* @copydoc cudf::io::orc::gpu::BuildStripeDictionaries
*/
void BuildStripeDictionaries(StripeDictionary *stripes,
StripeDictionary *stripes_host,
DictionaryChunk const *chunks,
uint32_t num_stripes,
uint32_t num_rowgroups,
uint32_t num_columns,
rmm::cuda_stream_view stream)
{
dim3 dim_block(1024, 1); // 1024 threads per chunk
dim3 dim_grid_build(num_columns, num_stripes);
gpuCompactChunkDictionaries<<<dim_grid_build, dim_block, 0, stream.value()>>>(
stripes, chunks, num_columns);
for (uint32_t i = 0; i < num_stripes * num_columns; i++) {
if (stripes_host[i].dict_data != nullptr) {
thrust::device_ptr<uint32_t> dict_data_ptr =
thrust::device_pointer_cast(stripes_host[i].dict_data);
column_device_view *string_column = stripes_host[i].leaf_column;
// NOTE: Requires the --expt-extended-lambda nvcc flag
thrust::sort(rmm::exec_policy(stream),
dict_data_ptr,
dict_data_ptr + stripes_host[i].num_strings,
[string_column] __device__(const uint32_t &lhs, const uint32_t &rhs) {
return string_column->element<string_view>(lhs) <
string_column->element<string_view>(rhs);
});
}
}
gpuBuildStripeDictionaries<1024>
<<<dim_grid_build, dim_block, 0, stream.value()>>>(stripes, num_columns);
}
} // namespace gpu
} // namespace orc
} // namespace io
} // namespace cudf
|
ed3cc9409efb744152f3e5d26f16ad100f91862b.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <hip/hip_runtime.h>
#include <cmath>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#include <thrust/random.h>
#include <thrust/remove.h>
#include "sceneStructs.h"
#include "scene.h"
#include "glm/glm.hpp"
#include "glm/gtx/norm.hpp"
#include "utilities.h"
#include "pathtrace.h"
#include "intersections.h"
#include "interactions.h"
#define ERRORCHECK 1
#define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
#define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__)
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
#if ERRORCHECK
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
if (hipSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err));
# ifdef _WIN32
getchar();
# endif
exit(EXIT_FAILURE);
#endif
}
__host__ __device__
thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) {
int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index);
return thrust::default_random_engine(h);
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution,
int iter, glm::vec3* image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < resolution.x && y < resolution.y) {
int index = x + (y * resolution.x);
glm::vec3 pix = image[index];
glm::ivec3 color;
color.x = glm::clamp((int)(pix.x / iter * 255.0), 0, 255);
color.y = glm::clamp((int)(pix.y / iter * 255.0), 0, 255);
color.z = glm::clamp((int)(pix.z / iter * 255.0), 0, 255);
/*printf("%d\n", iter);*/
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
static Scene * hst_scene = NULL;
static glm::vec3 * dev_image = NULL;
static Geom * dev_geoms = NULL;
static Material * dev_materials = NULL;
static PathSegment * dev_paths = NULL;
static ShadeableIntersection * dev_intersections = NULL;
// TODO: static variables for device memory, any extra info you need, etc
thrust::device_ptr<PathSegment> dev_thrust_paths;
thrust::device_ptr<ShadeableIntersection> dev_thrust_intersections;
void pathtraceInit(Scene *scene) {
hst_scene = scene;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
hipMalloc(&dev_image, pixelcount * sizeof(glm::vec3));
hipMemset(dev_image, 0, pixelcount * sizeof(glm::vec3));
hipMalloc(&dev_paths, pixelcount * sizeof(PathSegment));
hipMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom));
hipMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), hipMemcpyHostToDevice);
hipMalloc(&dev_materials, scene->materials.size() * sizeof(Material));
hipMemcpy(dev_materials, scene->materials.data(), scene->materials.size() * sizeof(Material), hipMemcpyHostToDevice);
hipMalloc(&dev_intersections, pixelcount * sizeof(ShadeableIntersection));
hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// TODO: initialize any extra device memeory you need
dev_thrust_paths = thrust::device_pointer_cast(dev_paths);
dev_thrust_intersections = thrust::device_pointer_cast(dev_intersections);
checkCUDAError("pathtraceInit");
}
void pathtraceFree() {
hipFree(dev_image); // no-op if dev_image is null
hipFree(dev_paths);
hipFree(dev_geoms);
hipFree(dev_materials);
hipFree(dev_intersections);
// TODO: clean up any extra device memory you created
checkCUDAError("pathtraceFree");
}
/**
* Generate PathSegments with rays from the camera through the screen into the
* scene, which is the first bounce of rays.
*
* Antialiasing - add rays for sub-pixel sampling
* motion blur - jitter rays "in time"
* lens effect - jitter ray origin positions based on a lens
*/
__global__ void generateRayFromCamera(Camera cam, int iter, int traceDepth, PathSegment* pathSegments)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < cam.resolution.x && y < cam.resolution.y) {
int index = x + (y * cam.resolution.x);
PathSegment & segment = pathSegments[index];
segment.ray.origin = cam.position;
segment.color = glm::vec3(1.0f, 1.0f, 1.0f);
// TODO: implement antialiasing by jittering the ray
segment.ray.direction = glm::normalize(cam.view
- cam.right * cam.pixelLength.x * ((float)x - (float)cam.resolution.x * 0.5f)
- cam.up * cam.pixelLength.y * ((float)y - (float)cam.resolution.y * 0.5f)
);
segment.pixelIndex = index;
segment.remainingBounces = traceDepth;
}
}
// TODO:
// computeIntersections handles generating ray intersections ONLY.
// Generating new rays is handled in your shader(s).
// Feel free to modify the code below.
__global__ void computeIntersections(
int depth
, int num_paths
, PathSegment * pathSegments
, Geom * geoms
, int geoms_size
, ShadeableIntersection * intersections
)
{
int path_index = blockIdx.x * blockDim.x + threadIdx.x;
if (path_index < num_paths)
{
PathSegment pathSegment = pathSegments[path_index];
float t;
glm::vec3 intersect_point;
glm::vec3 normal;
float t_min = FLT_MAX;
int hit_geom_index = -1;
bool outside = true;
glm::vec3 tmp_intersect;
glm::vec3 tmp_normal;
// naive parse through global geoms
for (int i = 0; i < geoms_size; i++)
{
Geom & geom = geoms[i];
if (geom.type == CUBE)
{
t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == SPHERE)
{
t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
// TODO: add more intersection tests here... triangle? metaball? CSG?
// Compute the minimum t from the intersection tests to determine what
// scene geometry object was hit first.
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = i;
intersect_point = tmp_intersect;
normal = tmp_normal;
}
}
if (hit_geom_index == -1)
{
intersections[path_index].t = -1.0f;
}
else
{
//The ray hits something
intersections[path_index].t = t_min;
intersections[path_index].materialId = geoms[hit_geom_index].materialid;
intersections[path_index].surfaceNormal = normal;
intersections[path_index].point = intersect_point;
}
}
}
// LOOK: "fake" shader demonstrating what you might do with the info in
// a ShadeableIntersection, as well as how to use thrust's random number
// generator. Observe that since the thrust random number generator basically
// adds "noise" to the iteration, the image should start off noisy and get
// cleaner as more iterations are computed.
//
// Note that this shader does NOT do a BSDF evaluation!
// Your shaders should handle that - this can allow techniques such as
// bump mapping.
__global__ void shadeFakeMaterial(
int iter
, int num_paths
, ShadeableIntersection * shadeableIntersections
, PathSegment * pathSegments
, Material * materials
, int depth
)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_paths)
{
ShadeableIntersection intersection = shadeableIntersections[idx];
if (intersection.t > 0.0f) { // if the intersection exists...
// Set up the RNG
// LOOK: this is how you use thrust's RNG! Please look at
// makeSeededRandomEngine as well.
thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, depth);
thrust::uniform_real_distribution<float> u01(0, 1);
Material material = materials[intersection.materialId];
glm::vec3 materialColor = material.color;
// If the material indicates that the object was a light, "light" the ray
if (material.emittance > 0.0f) {
pathSegments[idx].color *= (materialColor * material.emittance);
pathSegments[idx].remainingBounces = 0;
}
// Otherwise, do some pseudo-lighting computation. This is actually more
// like what you would expect from shading in a rasterizer like OpenGL.
// TODO: replace this! you should be able to start with basically a one-liner
else {
//float lightTerm = glm::dot(intersection.surfaceNormal, glm::vec3(0.0f, 1.0f, 0.0f));
//pathSegments[idx].color *= (materialColor * lightTerm) * 0.3f + ((1.0f - intersection.t * 0.02f) * materialColor) * 0.7f;
//pathSegments[idx].color *= u01(rng); // apply some noise because why not
scatterRay(pathSegments[idx], intersection.point, intersection.surfaceNormal, material, rng);
if (--pathSegments[idx].remainingBounces == 0)
{
pathSegments[idx].color = glm::vec3(0.0f);
}
}
// If there was no intersection, color the ray black.
// Lots of renderers use 4 channel color, RGBA, where A = alpha, often
// used for opacity, in which case they can indicate "no opacity".
// This can be useful for post-processing and image compositing.
}
else {
pathSegments[idx].color = glm::vec3(0.0f);
pathSegments[idx].remainingBounces = 0;
}
}
}
// Add the current iteration's output to the overall image
__global__ void finalGather(int nPaths, glm::vec3 * image, PathSegment * iterationPaths)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < nPaths)
{
PathSegment iterationPath = iterationPaths[index];
image[iterationPath.pixelIndex] += iterationPath.color;
}
}
struct isActive
{
__device__
bool operator()(const PathSegment &path)
{
return path.remainingBounces > 0;
}
};
__host__ __device__ bool operator<(const ShadeableIntersection &lhs, const ShadeableIntersection &rhs)
{
return lhs.materialId < rhs.materialId;
}
/**
* Wrapper for the __global__ call that sets up the kernel calls and does a ton
* of memory management
*/
void pathtrace(uchar4 *pbo, int frame, int iter, bool contiguousMaterial, bool cacheFirstIntersections) {
const int traceDepth = hst_scene->state.traceDepth;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
//printf("%d\n", iter);
// 2D block for generating ray from camera
const dim3 blockSize2d(8, 8);
const dim3 blocksPerGrid2d(
(cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x,
(cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y);
// 1D block for path tracing
const int blockSize1d = 128;
///////////////////////////////////////////////////////////////////////////
// Recap:
// * Initialize array of path rays (using rays that come out of the camera)
// * You can pass the Camera object to that kernel.
// * Each path ray must carry at minimum a (ray, color) pair,
// * where color starts as the multiplicative identity, white = (1, 1, 1).
// * This has already been done for you.
// * For each depth:
// * Compute an intersection in the scene for each path ray.
// A very naive version of this has been implemented for you, but feel
// free to add more primitives and/or a better algorithm.
// Currently, intersection distance is recorded as a parametric distance,
// t, or a "distance along the ray." t = -1.0 indicates no intersection.
// * Color is attenuated (multiplied) by reflections off of any object
// * TODO: Stream compact away all of the terminated paths.
// You may use either your implementation or `thrust::remove_if` or its
// cousins.
// * Note that you can't really use a 2D kernel launch any more - switch
// to 1D.
// * TODO: Shade the rays that intersected something or didn't bottom out.
// That is, color the ray by performing a color computation according
// to the shader, then generate a new ray to continue the ray path.
// We recommend just updating the ray's PathSegment in place.
// Note that this step may come before or after stream compaction,
// since some shaders you write may also cause a path to terminate.
// * Finally, add this iteration's results to the image. This has been done
// for you.
// TODO: perform one iteration of path tracing
generateRayFromCamera << <blocksPerGrid2d, blockSize2d >> > (cam, iter, traceDepth, dev_paths);
checkCUDAError("generate camera ray");
int depth = 0;
PathSegment* dev_path_end = dev_paths + pixelcount;
int num_paths = dev_path_end - dev_paths;
// --- PathSegment Tracing Stage ---
// Shoot ray into scene, bounce between objects, push shading chunks
bool iterationComplete = false;
while (!iterationComplete) {
// clean shading chunks
hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// tracing
dim3 numblocksPathSegmentTracing = (num_paths + blockSize1d - 1) / blockSize1d;
computeIntersections << <numblocksPathSegmentTracing, blockSize1d >> > (
depth
, num_paths
, dev_paths
, dev_geoms
, hst_scene->geoms.size()
, dev_intersections
);
checkCUDAError("trace one bounce");
hipDeviceSynchronize();
depth++;
if (contiguousMaterial)
{
thrust::sort_by_key(dev_thrust_intersections, dev_thrust_intersections + pixelcount, dev_thrust_paths);
}
// TODO:
// --- Shading Stage ---
// Shade path segments based on intersections and generate new rays by
// evaluating the BSDF.
// Start off with just a big kernel that handles all the different
// materials you have in the scenefile.
// TODO: compare between directly shading the path segments and shading
// path segments that have been reshuffled to be contiguous in memory.
shadeFakeMaterial << <numblocksPathSegmentTracing, blockSize1d >> > (
iter,
num_paths,
dev_intersections,
dev_paths,
dev_materials,
depth
);
// TODO: Stream compaction
num_paths = thrust::partition(dev_thrust_paths, dev_thrust_paths + pixelcount, isActive()) - dev_thrust_paths;
if (num_paths == 0)
//if (depth == traceDepth)
{
num_paths = pixelcount;
iterationComplete = true; // TODO: should be based off stream compaction results.
}
}
// Assemble this iteration and apply it to the image
dim3 numBlocksPixels = (pixelcount + blockSize1d - 1) / blockSize1d;
finalGather << <numBlocksPixels, blockSize1d >> > (num_paths, dev_image, dev_paths);
///////////////////////////////////////////////////////////////////////////
// Send results to OpenGL buffer for rendering
sendImageToPBO << <blocksPerGrid2d, blockSize2d >> > (pbo, cam.resolution, iter, dev_image);
// Retrieve image from GPU
hipMemcpy(hst_scene->state.image.data(), dev_image,
pixelcount * sizeof(glm::vec3), hipMemcpyDeviceToHost);
checkCUDAError("pathtrace");
}
| ed3cc9409efb744152f3e5d26f16ad100f91862b.cu | #include <cstdio>
#include <cuda.h>
#include <cmath>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#include <thrust/random.h>
#include <thrust/remove.h>
#include "sceneStructs.h"
#include "scene.h"
#include "glm/glm.hpp"
#include "glm/gtx/norm.hpp"
#include "utilities.h"
#include "pathtrace.h"
#include "intersections.h"
#include "interactions.h"
#define ERRORCHECK 1
#define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
#define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__)
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
#if ERRORCHECK
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
if (cudaSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err));
# ifdef _WIN32
getchar();
# endif
exit(EXIT_FAILURE);
#endif
}
__host__ __device__
thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) {
int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index);
return thrust::default_random_engine(h);
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution,
int iter, glm::vec3* image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < resolution.x && y < resolution.y) {
int index = x + (y * resolution.x);
glm::vec3 pix = image[index];
glm::ivec3 color;
color.x = glm::clamp((int)(pix.x / iter * 255.0), 0, 255);
color.y = glm::clamp((int)(pix.y / iter * 255.0), 0, 255);
color.z = glm::clamp((int)(pix.z / iter * 255.0), 0, 255);
/*printf("%d\n", iter);*/
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
static Scene * hst_scene = NULL;
static glm::vec3 * dev_image = NULL;
static Geom * dev_geoms = NULL;
static Material * dev_materials = NULL;
static PathSegment * dev_paths = NULL;
static ShadeableIntersection * dev_intersections = NULL;
// TODO: static variables for device memory, any extra info you need, etc
thrust::device_ptr<PathSegment> dev_thrust_paths;
thrust::device_ptr<ShadeableIntersection> dev_thrust_intersections;
void pathtraceInit(Scene *scene) {
hst_scene = scene;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
cudaMalloc(&dev_image, pixelcount * sizeof(glm::vec3));
cudaMemset(dev_image, 0, pixelcount * sizeof(glm::vec3));
cudaMalloc(&dev_paths, pixelcount * sizeof(PathSegment));
cudaMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom));
cudaMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), cudaMemcpyHostToDevice);
cudaMalloc(&dev_materials, scene->materials.size() * sizeof(Material));
cudaMemcpy(dev_materials, scene->materials.data(), scene->materials.size() * sizeof(Material), cudaMemcpyHostToDevice);
cudaMalloc(&dev_intersections, pixelcount * sizeof(ShadeableIntersection));
cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// TODO: initialize any extra device memeory you need
dev_thrust_paths = thrust::device_pointer_cast(dev_paths);
dev_thrust_intersections = thrust::device_pointer_cast(dev_intersections);
checkCUDAError("pathtraceInit");
}
void pathtraceFree() {
cudaFree(dev_image); // no-op if dev_image is null
cudaFree(dev_paths);
cudaFree(dev_geoms);
cudaFree(dev_materials);
cudaFree(dev_intersections);
// TODO: clean up any extra device memory you created
checkCUDAError("pathtraceFree");
}
/**
* Generate PathSegments with rays from the camera through the screen into the
* scene, which is the first bounce of rays.
*
* Antialiasing - add rays for sub-pixel sampling
* motion blur - jitter rays "in time"
* lens effect - jitter ray origin positions based on a lens
*/
__global__ void generateRayFromCamera(Camera cam, int iter, int traceDepth, PathSegment* pathSegments)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < cam.resolution.x && y < cam.resolution.y) {
int index = x + (y * cam.resolution.x);
PathSegment & segment = pathSegments[index];
segment.ray.origin = cam.position;
segment.color = glm::vec3(1.0f, 1.0f, 1.0f);
// TODO: implement antialiasing by jittering the ray
segment.ray.direction = glm::normalize(cam.view
- cam.right * cam.pixelLength.x * ((float)x - (float)cam.resolution.x * 0.5f)
- cam.up * cam.pixelLength.y * ((float)y - (float)cam.resolution.y * 0.5f)
);
segment.pixelIndex = index;
segment.remainingBounces = traceDepth;
}
}
// TODO:
// computeIntersections handles generating ray intersections ONLY.
// Generating new rays is handled in your shader(s).
// Feel free to modify the code below.
__global__ void computeIntersections(
int depth
, int num_paths
, PathSegment * pathSegments
, Geom * geoms
, int geoms_size
, ShadeableIntersection * intersections
)
{
int path_index = blockIdx.x * blockDim.x + threadIdx.x;
if (path_index < num_paths)
{
PathSegment pathSegment = pathSegments[path_index];
float t;
glm::vec3 intersect_point;
glm::vec3 normal;
float t_min = FLT_MAX;
int hit_geom_index = -1;
bool outside = true;
glm::vec3 tmp_intersect;
glm::vec3 tmp_normal;
// naive parse through global geoms
for (int i = 0; i < geoms_size; i++)
{
Geom & geom = geoms[i];
if (geom.type == CUBE)
{
t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == SPHERE)
{
t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
// TODO: add more intersection tests here... triangle? metaball? CSG?
// Compute the minimum t from the intersection tests to determine what
// scene geometry object was hit first.
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = i;
intersect_point = tmp_intersect;
normal = tmp_normal;
}
}
if (hit_geom_index == -1)
{
intersections[path_index].t = -1.0f;
}
else
{
//The ray hits something
intersections[path_index].t = t_min;
intersections[path_index].materialId = geoms[hit_geom_index].materialid;
intersections[path_index].surfaceNormal = normal;
intersections[path_index].point = intersect_point;
}
}
}
// LOOK: "fake" shader demonstrating what you might do with the info in
// a ShadeableIntersection, as well as how to use thrust's random number
// generator. Observe that since the thrust random number generator basically
// adds "noise" to the iteration, the image should start off noisy and get
// cleaner as more iterations are computed.
//
// Note that this shader does NOT do a BSDF evaluation!
// Your shaders should handle that - this can allow techniques such as
// bump mapping.
__global__ void shadeFakeMaterial(
int iter
, int num_paths
, ShadeableIntersection * shadeableIntersections
, PathSegment * pathSegments
, Material * materials
, int depth
)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_paths)
{
ShadeableIntersection intersection = shadeableIntersections[idx];
if (intersection.t > 0.0f) { // if the intersection exists...
// Set up the RNG
// LOOK: this is how you use thrust's RNG! Please look at
// makeSeededRandomEngine as well.
thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, depth);
thrust::uniform_real_distribution<float> u01(0, 1);
Material material = materials[intersection.materialId];
glm::vec3 materialColor = material.color;
// If the material indicates that the object was a light, "light" the ray
if (material.emittance > 0.0f) {
pathSegments[idx].color *= (materialColor * material.emittance);
pathSegments[idx].remainingBounces = 0;
}
// Otherwise, do some pseudo-lighting computation. This is actually more
// like what you would expect from shading in a rasterizer like OpenGL.
// TODO: replace this! you should be able to start with basically a one-liner
else {
//float lightTerm = glm::dot(intersection.surfaceNormal, glm::vec3(0.0f, 1.0f, 0.0f));
//pathSegments[idx].color *= (materialColor * lightTerm) * 0.3f + ((1.0f - intersection.t * 0.02f) * materialColor) * 0.7f;
//pathSegments[idx].color *= u01(rng); // apply some noise because why not
scatterRay(pathSegments[idx], intersection.point, intersection.surfaceNormal, material, rng);
if (--pathSegments[idx].remainingBounces == 0)
{
pathSegments[idx].color = glm::vec3(0.0f);
}
}
// If there was no intersection, color the ray black.
// Lots of renderers use 4 channel color, RGBA, where A = alpha, often
// used for opacity, in which case they can indicate "no opacity".
// This can be useful for post-processing and image compositing.
}
else {
pathSegments[idx].color = glm::vec3(0.0f);
pathSegments[idx].remainingBounces = 0;
}
}
}
// Add the current iteration's output to the overall image
__global__ void finalGather(int nPaths, glm::vec3 * image, PathSegment * iterationPaths)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < nPaths)
{
PathSegment iterationPath = iterationPaths[index];
image[iterationPath.pixelIndex] += iterationPath.color;
}
}
struct isActive
{
__device__
bool operator()(const PathSegment &path)
{
return path.remainingBounces > 0;
}
};
__host__ __device__ bool operator<(const ShadeableIntersection &lhs, const ShadeableIntersection &rhs)
{
return lhs.materialId < rhs.materialId;
}
/**
* Wrapper for the __global__ call that sets up the kernel calls and does a ton
* of memory management
*/
void pathtrace(uchar4 *pbo, int frame, int iter, bool contiguousMaterial, bool cacheFirstIntersections) {
const int traceDepth = hst_scene->state.traceDepth;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
//printf("%d\n", iter);
// 2D block for generating ray from camera
const dim3 blockSize2d(8, 8);
const dim3 blocksPerGrid2d(
(cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x,
(cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y);
// 1D block for path tracing
const int blockSize1d = 128;
///////////////////////////////////////////////////////////////////////////
// Recap:
// * Initialize array of path rays (using rays that come out of the camera)
// * You can pass the Camera object to that kernel.
// * Each path ray must carry at minimum a (ray, color) pair,
// * where color starts as the multiplicative identity, white = (1, 1, 1).
// * This has already been done for you.
// * For each depth:
// * Compute an intersection in the scene for each path ray.
// A very naive version of this has been implemented for you, but feel
// free to add more primitives and/or a better algorithm.
// Currently, intersection distance is recorded as a parametric distance,
// t, or a "distance along the ray." t = -1.0 indicates no intersection.
// * Color is attenuated (multiplied) by reflections off of any object
// * TODO: Stream compact away all of the terminated paths.
// You may use either your implementation or `thrust::remove_if` or its
// cousins.
// * Note that you can't really use a 2D kernel launch any more - switch
// to 1D.
// * TODO: Shade the rays that intersected something or didn't bottom out.
// That is, color the ray by performing a color computation according
// to the shader, then generate a new ray to continue the ray path.
// We recommend just updating the ray's PathSegment in place.
// Note that this step may come before or after stream compaction,
// since some shaders you write may also cause a path to terminate.
// * Finally, add this iteration's results to the image. This has been done
// for you.
// TODO: perform one iteration of path tracing
generateRayFromCamera << <blocksPerGrid2d, blockSize2d >> > (cam, iter, traceDepth, dev_paths);
checkCUDAError("generate camera ray");
int depth = 0;
PathSegment* dev_path_end = dev_paths + pixelcount;
int num_paths = dev_path_end - dev_paths;
// --- PathSegment Tracing Stage ---
// Shoot ray into scene, bounce between objects, push shading chunks
bool iterationComplete = false;
while (!iterationComplete) {
// clean shading chunks
cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// tracing
dim3 numblocksPathSegmentTracing = (num_paths + blockSize1d - 1) / blockSize1d;
computeIntersections << <numblocksPathSegmentTracing, blockSize1d >> > (
depth
, num_paths
, dev_paths
, dev_geoms
, hst_scene->geoms.size()
, dev_intersections
);
checkCUDAError("trace one bounce");
cudaDeviceSynchronize();
depth++;
if (contiguousMaterial)
{
thrust::sort_by_key(dev_thrust_intersections, dev_thrust_intersections + pixelcount, dev_thrust_paths);
}
// TODO:
// --- Shading Stage ---
// Shade path segments based on intersections and generate new rays by
// evaluating the BSDF.
// Start off with just a big kernel that handles all the different
// materials you have in the scenefile.
// TODO: compare between directly shading the path segments and shading
// path segments that have been reshuffled to be contiguous in memory.
shadeFakeMaterial << <numblocksPathSegmentTracing, blockSize1d >> > (
iter,
num_paths,
dev_intersections,
dev_paths,
dev_materials,
depth
);
// TODO: Stream compaction
num_paths = thrust::partition(dev_thrust_paths, dev_thrust_paths + pixelcount, isActive()) - dev_thrust_paths;
if (num_paths == 0)
//if (depth == traceDepth)
{
num_paths = pixelcount;
iterationComplete = true; // TODO: should be based off stream compaction results.
}
}
// Assemble this iteration and apply it to the image
dim3 numBlocksPixels = (pixelcount + blockSize1d - 1) / blockSize1d;
finalGather << <numBlocksPixels, blockSize1d >> > (num_paths, dev_image, dev_paths);
///////////////////////////////////////////////////////////////////////////
// Send results to OpenGL buffer for rendering
sendImageToPBO << <blocksPerGrid2d, blockSize2d >> > (pbo, cam.resolution, iter, dev_image);
// Retrieve image from GPU
cudaMemcpy(hst_scene->state.image.data(), dev_image,
pixelcount * sizeof(glm::vec3), cudaMemcpyDeviceToHost);
checkCUDAError("pathtrace");
}
|
24d2728c8a6767337b0ec388e6f5dc34a16d776d.hip | // !!! This is a file automatically generated by hipify!!!
// generated by gen_cuda_conv_bias_kern_impls.py
#include "../conv_bias_int8_implicit_gemm_cdiv4hwn4.cuinl"
template void megdnn::cuda::conv_bias_int8::do_conv_bias_int8_implicit_gemm_cdiv4hwn4<PerChannelBiasVisitor,
IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::H_SWISH>>>(
const int8_t* d_src,
const int8_t* d_filter,
PerChannelBiasVisitor bias,
IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::H_SWISH>> epilogue,
const ConvParam& param,
float alpha,
float beta,
hipStream_t stream);
| 24d2728c8a6767337b0ec388e6f5dc34a16d776d.cu | // generated by gen_cuda_conv_bias_kern_impls.py
#include "../conv_bias_int8_implicit_gemm_cdiv4hwn4.cuinl"
template void megdnn::cuda::conv_bias_int8::do_conv_bias_int8_implicit_gemm_cdiv4hwn4<PerChannelBiasVisitor,
IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::H_SWISH>>>(
const int8_t* d_src,
const int8_t* d_filter,
PerChannelBiasVisitor bias,
IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::H_SWISH>> epilogue,
const ConvParam& param,
float alpha,
float beta,
cudaStream_t stream);
|
5e794b1a3d8b2ee0752b64c3056f36d412d8605d.hip | // !!! This is a file automatically generated by hipify!!!
#ifdef _WIN64
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#endif
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <time.h>
#include <algorithm>
#include "thrust/sort.h"
#include "thrust/device_ptr.h"
#include "thrust/host_vector.h"
#include <thrust/execution_policy.h>
#include "device_vector2.h"
#include "math.h"
#include <vector>
#include <hip/hip_runtime_api.h>
typedef enum {
MODE_PRINT,
MODE_PERF
} simulation_mode_t;
struct particle_t {
double i;
vector2 position;
vector2 velocity;
int p_collisions;
int w_collisions;
};
__constant__ int l, r, s;
__constant__ int n;
int host_n;
struct Collision {
__host__ __device__ Collision() {}
__device__ Collision(int i, int j, double stepValue) : i(i), j(j), stepValue(stepValue) {}
int i;
int j;
double stepValue;
};
__host__ __device__ bool operator<(const Collision& lhs, const Collision& rhs) {
return lhs.stepValue < rhs.stepValue;
}
__managed__ particle_t* particles;
__managed__ int* numCollisions;
__managed__ Collision* collisions;
__managed__ bool* resolved;
__managed__ Collision* validCollisions;
__device__ bool isStepValid(double step) {
return 0 <= step && step < 1;
}
// 2 is returned as "infinity"
__device__ double detectParticleCollision_cuda(particle_t a, particle_t b) {
double distance = dist(b.position, a.position);
double sumRadii = r + r;
distance -= sumRadii;
vector2 resultVector = a.velocity - b.velocity;
double resultMag = magnitude(resultVector);
if (resultMag < distance) return 2;
vector2 unitResultVector = resultVector;
unitResultVector.normalize();
vector2 c = b.position - a.position;
double d = unitResultVector * c;
if (d <= 0) return 2;
double lengthC = magnitude(c);
double fSquared = lengthC * lengthC - d * d;
double sumRadiiSquared = sumRadii * sumRadii;
// Escape: closest that a will get to b.
if (fSquared >= sumRadiiSquared) return 2;
double tSquared = sumRadiiSquared - fSquared;
// negative tSquared. Probably don't have to do this check because the one preceding
// this one already ensures that tSquared isn't negative.
if (tSquared < 0) return 2;
double distanceToCollide = d - std::sqrt(tSquared);
// Ensure that distance A has to move to touch B
// is not greater than the magnitude of the movement vector
if (resultMag < distanceToCollide) return 2;
// the final displacement that the particle would have just before the collision.
// can also choose to return this in a result vector;
vector2 finalVector = unitResultVector * distanceToCollide;
return magnitude(finalVector) / resultMag;
}
__device__ void checkParticleCollisions(int i, const int max_collisions) {
// i: particle index
const particle_t& current = particles[i];
for (int j = 0; j < i + 1; j++) {
collisions[i * max_collisions + j] = Collision(i, j, 2.0);
}
for (int j = i + 1; j < max_collisions - 1; j++) {
const particle_t& target = particles[j];
double step = detectParticleCollision_cuda(current, target);
collisions[i * max_collisions + j] = Collision(i, j, step);
}
}
__device__ Collision detectWallCollision_cuda(const particle_t& p) {
vector2 end_pos = p.position + p.velocity;
Collision result(0, 0, 2.0); // stepValue > 1 means no collision
// TODO: reduce branching
if (end_pos.x - r <= 0) { // left, -1
Collision temp = Collision(p.i, -1, (r - p.position.x) / p.velocity.x);
if (temp < result) result = temp;
}
if (end_pos.x + r >= l) { // right, -2
Collision temp = Collision(p.i, -2, (l - r - p.position.x) / p.velocity.x);
if (temp < result) result = temp;
}
if (end_pos.y - r <= 0) { // bottom, -3
Collision temp = Collision(p.i, -3, (r - p.position.y) / p.velocity.y);
if (temp < result) result = temp;
}
if (end_pos.y + r >= l) { // top, -4
Collision temp = Collision(p.i, -4, (l - r - p.position.y) / p.velocity.y);
if (temp < result) result = temp;
}
return result;
}
__device__ void checkWallCollisions(int i, const int max_collisions) {
collisions[i * max_collisions + max_collisions-1] = detectWallCollision_cuda(particles[i]);
}
__host__ double fRand(double fMin, double fMax) {
return fMin + ((double)rand() / RAND_MAX) * (fMax - fMin);
}
__global__ void runCollisionChecks(int numParticles, int threadsTotal, int chunkNo) {
int i = chunkNo * threadsTotal + blockIdx.x * blockDim.x + threadIdx.x;
if (i >= numParticles) return;
checkParticleCollisions(i, numParticles + 1);
checkWallCollisions(i, numParticles + 1);
}
__device__ double clamp(double d, double min, double max) {
const double t = d < min ? min : d;
return t > max ? max : t;
}
// keep a particle within bounds
__device__ void clampParticleBounds(particle_t& p) {
double x = p.position.x;
double y = p.position.y;
p.position.x = clamp(x, r, s - r);
p.position.y = clamp(y, r, s - r);
}
__device__ void resolveWallCollision(particle_t& p, int wall, double stepProportion) {
switch (wall) {
case -1:
p.position += p.velocity * stepProportion;
p.velocity.x *= -1;
break;
case -2:
p.position += p.velocity * stepProportion;
p.velocity.x *= -1;
break;
case -3:
p.position += p.velocity * stepProportion;
p.velocity.y *= -1;
break;
case -4:
p.position += p.velocity * stepProportion;
p.velocity.y *= -1;
}
p.position += p.velocity * (1 - stepProportion);
}
__device__ void resolveParticleCollision(particle_t& a, particle_t& b, double stepProportion) {
vector2 aImpact = a.position + a.velocity * stepProportion;
vector2 bImpact = b.position + b.velocity * stepProportion;
double d = dist(aImpact, bImpact);
vector2 n = vector2((bImpact.x - aImpact.x) / d, (bImpact.y - aImpact.y) / d);
double p = 2 * (a.velocity * n - b.velocity * n) / 2;
a.velocity = a.velocity - n * p * 1;
b.velocity = b.velocity + n * p * 1;
a.position = aImpact + a.velocity * (1.0f - stepProportion);
b.position = bImpact + b.velocity * (1.0f - stepProportion);
}
__global__ void resolveCollisions(int size, int threadsTotal, int chunkNo) {
int i = chunkNo * threadsTotal + blockIdx.x * blockDim.x + threadIdx.x;
if (i >= size) return;
Collision res = validCollisions[i];
if (res.j < 0) {
resolveWallCollision(particles[res.i], res.j, res.stepValue);
clampParticleBounds(particles[res.i]);
particles[res.i].w_collisions++;
} else {
resolveParticleCollision(particles[res.i], particles[res.j], res.stepValue);
clampParticleBounds(particles[res.i]);
clampParticleBounds(particles[res.j]);
particles[res.i].p_collisions++;
particles[res.j].p_collisions++;
}
}
__global__ void moveUnresolvedParticles(bool* resolved, int numParticles, int threadsTotal, int chunkNo) {
int i = chunkNo * threadsTotal + blockIdx.x * blockDim.x + threadIdx.x;
if (i >= numParticles) return;
if (!resolved[i]) {
particles[i].position += particles[i].velocity;
}
}
__host__ void print_particles(int step) {
for (int i = 0; i < host_n; i++) {
printf("%d %d %10.8f %10.8f %10.8f %10.8f\n", step, i, particles[i].position.x, particles[i].position.y,
particles[i].velocity.x, particles[i].velocity.y);
}
}
__host__ void print_statistics(int num_step) {
for (int i = 0; i < host_n; i++) {
printf("%d %d %10.8f %10.8f %10.8f %10.8f %d %d\n", num_step, i, particles[i].position.x,
particles[i].position.y, particles[i].velocity.x, particles[i].velocity.y,
particles[i].p_collisions, particles[i].w_collisions);
}
}
int main(int argc, char** argv)
{
int i;
double x, y, vx, vy;
int num_blocks, num_threads;
int step;
int host_l, host_r, host_s;
simulation_mode_t mode;
char mode_buf[6];
if (argc != 3) {
printf("Usage:\n%s num_blocks numParticles\n", argv[0]);
return 1;
}
num_blocks = atoi(argv[1]);
num_threads = atoi(argv[2]);
scanf("%d", &host_n);
scanf("%d", &host_l);
scanf("%d", &host_r);
scanf("%d", &host_s);
scanf("%5s", mode_buf);
hipMallocManaged(&numCollisions, sizeof(int) * host_n);
hipMallocManaged(&particles, sizeof(particle_t) * host_n);
hipMallocManaged(&collisions, sizeof(Collision) * host_n * (host_n+1)); // [particle][collided object (host_n=wall)]
hipMallocManaged(&resolved, sizeof(bool) * host_n);
hipMallocManaged(&validCollisions, sizeof(Collision) * host_n);
for (i = 0; i < host_n; i++) {
particles[i].i = -1;
particles[i].p_collisions = 0;
particles[i].w_collisions = 0;
}
while (scanf("%d %lf %lf %lf %lf", &i, &x, &y, &vx, &vy) != EOF) {
particles[i].i = i;
particles[i].position = vector2(x, y);
particles[i].velocity = vector2(vx, vy);
}
if (particles[0].i == -1) {
srand(time(NULL));
double minVelocity = 1 / 4;
double maxVelocity = 1 / (8 * host_r);
for (int i = 0; i < host_n; i++) {
int sign1 = (rand() % 2) ? 1 : -1;
int sign2 = (rand() % 2) ? 1 : -1;
particles[i].position = vector2(fRand(host_r, 1 - host_r), fRand(host_r, 1 - host_r));
particles[i].velocity = vector2(sign1 * fRand(minVelocity, maxVelocity), sign2 * fRand(minVelocity, maxVelocity));
}
}
mode = strcmp(mode_buf, "print") == 0 ? MODE_PRINT : MODE_PERF;
/* Copy to GPU constant memory */
hipMemcpyToSymbol(n, &host_n, sizeof(n));
hipMemcpyToSymbol(l, &host_l, sizeof(l));
hipMemcpyToSymbol(r, &host_r, sizeof(r));
hipMemcpyToSymbol(s, &host_s, sizeof(s));
int threadsTotal = num_blocks * num_threads;
hipProfilerStart();
for (step = 0; step < host_s; step++) {
if (mode == MODE_PRINT || step == 0) {
print_particles(step);
}
int numChunks = ceil((double)host_n / (double)threadsTotal);
for (int chunkNo = 0; chunkNo < numChunks; chunkNo++) {
/* Check collisions */
hipLaunchKernelGGL(( runCollisionChecks), dim3(num_blocks), dim3(num_threads), 0, 0, host_n, threadsTotal, chunkNo);
}
/* Barrier */
hipDeviceSynchronize();
thrust::sort(collisions, collisions + host_n * (host_n + 1));
// Collision Validation
hipMemset(resolved, 0, host_n);
hipMemset(validCollisions, 0, host_n * sizeof(Collision));
int next_idx = 0;
for (int i = 0; collisions[i].stepValue < 1.5; i++) {
Collision res = collisions[i];
if (resolved[res.i]) continue;
if (res.j < 0) {
validCollisions[next_idx] = res;
++next_idx;
resolved[res.i] = true;
}
else {
if (resolved[res.j]) continue;
validCollisions[next_idx] = res;
++next_idx;
resolved[res.i] = true;
resolved[res.j] = true;
}
}
int numValidCollisionChunks = ceil(next_idx / (double)threadsTotal);
for (int chunkNo = 0; chunkNo < numValidCollisionChunks; chunkNo++) {
hipLaunchKernelGGL(( resolveCollisions), dim3(num_blocks), dim3(num_threads), 0, 0, next_idx, threadsTotal, chunkNo);
}
for (int chunkNo = 0; chunkNo < numChunks; chunkNo++) {
hipLaunchKernelGGL(( moveUnresolvedParticles), dim3(num_blocks), dim3(num_threads), 0, 0, resolved, host_n, threadsTotal, chunkNo);
}
hipDeviceSynchronize();
}
hipProfilerStop();
print_statistics(host_s);
return 0;
}
| 5e794b1a3d8b2ee0752b64c3056f36d412d8605d.cu | #ifdef _WIN64
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#endif
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <time.h>
#include <algorithm>
#include "thrust/sort.h"
#include "thrust/device_ptr.h"
#include "thrust/host_vector.h"
#include <thrust/execution_policy.h>
#include "device_vector2.h"
#include "math.h"
#include <vector>
#include <cuda_profiler_api.h>
typedef enum {
MODE_PRINT,
MODE_PERF
} simulation_mode_t;
struct particle_t {
double i;
vector2 position;
vector2 velocity;
int p_collisions;
int w_collisions;
};
__constant__ int l, r, s;
__constant__ int n;
int host_n;
struct Collision {
__host__ __device__ Collision() {}
__device__ Collision(int i, int j, double stepValue) : i(i), j(j), stepValue(stepValue) {}
int i;
int j;
double stepValue;
};
__host__ __device__ bool operator<(const Collision& lhs, const Collision& rhs) {
return lhs.stepValue < rhs.stepValue;
}
__managed__ particle_t* particles;
__managed__ int* numCollisions;
__managed__ Collision* collisions;
__managed__ bool* resolved;
__managed__ Collision* validCollisions;
__device__ bool isStepValid(double step) {
return 0 <= step && step < 1;
}
// 2 is returned as "infinity"
__device__ double detectParticleCollision_cuda(particle_t a, particle_t b) {
double distance = dist(b.position, a.position);
double sumRadii = r + r;
distance -= sumRadii;
vector2 resultVector = a.velocity - b.velocity;
double resultMag = magnitude(resultVector);
if (resultMag < distance) return 2;
vector2 unitResultVector = resultVector;
unitResultVector.normalize();
vector2 c = b.position - a.position;
double d = unitResultVector * c;
if (d <= 0) return 2;
double lengthC = magnitude(c);
double fSquared = lengthC * lengthC - d * d;
double sumRadiiSquared = sumRadii * sumRadii;
// Escape: closest that a will get to b.
if (fSquared >= sumRadiiSquared) return 2;
double tSquared = sumRadiiSquared - fSquared;
// negative tSquared. Probably don't have to do this check because the one preceding
// this one already ensures that tSquared isn't negative.
if (tSquared < 0) return 2;
double distanceToCollide = d - std::sqrt(tSquared);
// Ensure that distance A has to move to touch B
// is not greater than the magnitude of the movement vector
if (resultMag < distanceToCollide) return 2;
// the final displacement that the particle would have just before the collision.
// can also choose to return this in a result vector;
vector2 finalVector = unitResultVector * distanceToCollide;
return magnitude(finalVector) / resultMag;
}
__device__ void checkParticleCollisions(int i, const int max_collisions) {
// i: particle index
const particle_t& current = particles[i];
for (int j = 0; j < i + 1; j++) {
collisions[i * max_collisions + j] = Collision(i, j, 2.0);
}
for (int j = i + 1; j < max_collisions - 1; j++) {
const particle_t& target = particles[j];
double step = detectParticleCollision_cuda(current, target);
collisions[i * max_collisions + j] = Collision(i, j, step);
}
}
__device__ Collision detectWallCollision_cuda(const particle_t& p) {
vector2 end_pos = p.position + p.velocity;
Collision result(0, 0, 2.0); // stepValue > 1 means no collision
// TODO: reduce branching
if (end_pos.x - r <= 0) { // left, -1
Collision temp = Collision(p.i, -1, (r - p.position.x) / p.velocity.x);
if (temp < result) result = temp;
}
if (end_pos.x + r >= l) { // right, -2
Collision temp = Collision(p.i, -2, (l - r - p.position.x) / p.velocity.x);
if (temp < result) result = temp;
}
if (end_pos.y - r <= 0) { // bottom, -3
Collision temp = Collision(p.i, -3, (r - p.position.y) / p.velocity.y);
if (temp < result) result = temp;
}
if (end_pos.y + r >= l) { // top, -4
Collision temp = Collision(p.i, -4, (l - r - p.position.y) / p.velocity.y);
if (temp < result) result = temp;
}
return result;
}
__device__ void checkWallCollisions(int i, const int max_collisions) {
collisions[i * max_collisions + max_collisions-1] = detectWallCollision_cuda(particles[i]);
}
__host__ double fRand(double fMin, double fMax) {
return fMin + ((double)rand() / RAND_MAX) * (fMax - fMin);
}
__global__ void runCollisionChecks(int numParticles, int threadsTotal, int chunkNo) {
int i = chunkNo * threadsTotal + blockIdx.x * blockDim.x + threadIdx.x;
if (i >= numParticles) return;
checkParticleCollisions(i, numParticles + 1);
checkWallCollisions(i, numParticles + 1);
}
__device__ double clamp(double d, double min, double max) {
const double t = d < min ? min : d;
return t > max ? max : t;
}
// keep a particle within bounds
__device__ void clampParticleBounds(particle_t& p) {
double x = p.position.x;
double y = p.position.y;
p.position.x = clamp(x, r, s - r);
p.position.y = clamp(y, r, s - r);
}
__device__ void resolveWallCollision(particle_t& p, int wall, double stepProportion) {
switch (wall) {
case -1:
p.position += p.velocity * stepProportion;
p.velocity.x *= -1;
break;
case -2:
p.position += p.velocity * stepProportion;
p.velocity.x *= -1;
break;
case -3:
p.position += p.velocity * stepProportion;
p.velocity.y *= -1;
break;
case -4:
p.position += p.velocity * stepProportion;
p.velocity.y *= -1;
}
p.position += p.velocity * (1 - stepProportion);
}
__device__ void resolveParticleCollision(particle_t& a, particle_t& b, double stepProportion) {
vector2 aImpact = a.position + a.velocity * stepProportion;
vector2 bImpact = b.position + b.velocity * stepProportion;
double d = dist(aImpact, bImpact);
vector2 n = vector2((bImpact.x - aImpact.x) / d, (bImpact.y - aImpact.y) / d);
double p = 2 * (a.velocity * n - b.velocity * n) / 2;
a.velocity = a.velocity - n * p * 1;
b.velocity = b.velocity + n * p * 1;
a.position = aImpact + a.velocity * (1.0f - stepProportion);
b.position = bImpact + b.velocity * (1.0f - stepProportion);
}
__global__ void resolveCollisions(int size, int threadsTotal, int chunkNo) {
int i = chunkNo * threadsTotal + blockIdx.x * blockDim.x + threadIdx.x;
if (i >= size) return;
Collision res = validCollisions[i];
if (res.j < 0) {
resolveWallCollision(particles[res.i], res.j, res.stepValue);
clampParticleBounds(particles[res.i]);
particles[res.i].w_collisions++;
} else {
resolveParticleCollision(particles[res.i], particles[res.j], res.stepValue);
clampParticleBounds(particles[res.i]);
clampParticleBounds(particles[res.j]);
particles[res.i].p_collisions++;
particles[res.j].p_collisions++;
}
}
__global__ void moveUnresolvedParticles(bool* resolved, int numParticles, int threadsTotal, int chunkNo) {
int i = chunkNo * threadsTotal + blockIdx.x * blockDim.x + threadIdx.x;
if (i >= numParticles) return;
if (!resolved[i]) {
particles[i].position += particles[i].velocity;
}
}
__host__ void print_particles(int step) {
for (int i = 0; i < host_n; i++) {
printf("%d %d %10.8f %10.8f %10.8f %10.8f\n", step, i, particles[i].position.x, particles[i].position.y,
particles[i].velocity.x, particles[i].velocity.y);
}
}
__host__ void print_statistics(int num_step) {
for (int i = 0; i < host_n; i++) {
printf("%d %d %10.8f %10.8f %10.8f %10.8f %d %d\n", num_step, i, particles[i].position.x,
particles[i].position.y, particles[i].velocity.x, particles[i].velocity.y,
particles[i].p_collisions, particles[i].w_collisions);
}
}
int main(int argc, char** argv)
{
int i;
double x, y, vx, vy;
int num_blocks, num_threads;
int step;
int host_l, host_r, host_s;
simulation_mode_t mode;
char mode_buf[6];
if (argc != 3) {
printf("Usage:\n%s num_blocks numParticles\n", argv[0]);
return 1;
}
num_blocks = atoi(argv[1]);
num_threads = atoi(argv[2]);
scanf("%d", &host_n);
scanf("%d", &host_l);
scanf("%d", &host_r);
scanf("%d", &host_s);
scanf("%5s", mode_buf);
cudaMallocManaged(&numCollisions, sizeof(int) * host_n);
cudaMallocManaged(&particles, sizeof(particle_t) * host_n);
cudaMallocManaged(&collisions, sizeof(Collision) * host_n * (host_n+1)); // [particle][collided object (host_n=wall)]
cudaMallocManaged(&resolved, sizeof(bool) * host_n);
cudaMallocManaged(&validCollisions, sizeof(Collision) * host_n);
for (i = 0; i < host_n; i++) {
particles[i].i = -1;
particles[i].p_collisions = 0;
particles[i].w_collisions = 0;
}
while (scanf("%d %lf %lf %lf %lf", &i, &x, &y, &vx, &vy) != EOF) {
particles[i].i = i;
particles[i].position = vector2(x, y);
particles[i].velocity = vector2(vx, vy);
}
if (particles[0].i == -1) {
srand(time(NULL));
double minVelocity = 1 / 4;
double maxVelocity = 1 / (8 * host_r);
for (int i = 0; i < host_n; i++) {
int sign1 = (rand() % 2) ? 1 : -1;
int sign2 = (rand() % 2) ? 1 : -1;
particles[i].position = vector2(fRand(host_r, 1 - host_r), fRand(host_r, 1 - host_r));
particles[i].velocity = vector2(sign1 * fRand(minVelocity, maxVelocity), sign2 * fRand(minVelocity, maxVelocity));
}
}
mode = strcmp(mode_buf, "print") == 0 ? MODE_PRINT : MODE_PERF;
/* Copy to GPU constant memory */
cudaMemcpyToSymbol(n, &host_n, sizeof(n));
cudaMemcpyToSymbol(l, &host_l, sizeof(l));
cudaMemcpyToSymbol(r, &host_r, sizeof(r));
cudaMemcpyToSymbol(s, &host_s, sizeof(s));
int threadsTotal = num_blocks * num_threads;
cudaProfilerStart();
for (step = 0; step < host_s; step++) {
if (mode == MODE_PRINT || step == 0) {
print_particles(step);
}
int numChunks = ceil((double)host_n / (double)threadsTotal);
for (int chunkNo = 0; chunkNo < numChunks; chunkNo++) {
/* Check collisions */
runCollisionChecks<<<num_blocks, num_threads>>>(host_n, threadsTotal, chunkNo);
}
/* Barrier */
cudaDeviceSynchronize();
thrust::sort(collisions, collisions + host_n * (host_n + 1));
// Collision Validation
cudaMemset(resolved, 0, host_n);
cudaMemset(validCollisions, 0, host_n * sizeof(Collision));
int next_idx = 0;
for (int i = 0; collisions[i].stepValue < 1.5; i++) {
Collision res = collisions[i];
if (resolved[res.i]) continue;
if (res.j < 0) {
validCollisions[next_idx] = res;
++next_idx;
resolved[res.i] = true;
}
else {
if (resolved[res.j]) continue;
validCollisions[next_idx] = res;
++next_idx;
resolved[res.i] = true;
resolved[res.j] = true;
}
}
int numValidCollisionChunks = ceil(next_idx / (double)threadsTotal);
for (int chunkNo = 0; chunkNo < numValidCollisionChunks; chunkNo++) {
resolveCollisions<<<num_blocks, num_threads>>>(next_idx, threadsTotal, chunkNo);
}
for (int chunkNo = 0; chunkNo < numChunks; chunkNo++) {
moveUnresolvedParticles<<<num_blocks, num_threads>>>(resolved, host_n, threadsTotal, chunkNo);
}
cudaDeviceSynchronize();
}
cudaProfilerStop();
print_statistics(host_s);
return 0;
}
|
eb47e8fba4e0ea76829b13fa3797192cc3d6d019.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "trabalho.h"
User *generateUser(char *buffer, int nRatings)
{
int pos = 0, ratePos = 0, state = 0, size;
float rateAux = -1.0f;
User *newUser = (User*)malloc(sizeof(User));
newUser->name = "None";
newUser->rating = (float*) calloc(nRatings, sizeof(float));
char *tempBuffer = "";
size = strlen(buffer);
do {
if(state == 0 && buffer[pos] == ';')
{
tempBuffer = buffer;
strtok(tempBuffer, ";");
newUser->name = (char*)malloc(strlen(tempBuffer) * sizeof(char));
newUser->name = strcpy(newUser->name, tempBuffer);
state = 1;
}
else if(state == 1 && buffer[pos] != ';')
{
// ratings
switch(buffer[pos])
{
case '?':
newUser->rating[ratePos] = -1;
break;
default:
sscanf(&buffer[pos], "%f", &rateAux);
newUser->rating[ratePos] = rateAux;
rateAux = -1.0f;
break;
}
ratePos++;
}
pos++;
}while(pos < size);
return newUser;
}
int findAmmountOfRatesByUser(User *target, int nElements)
{
int total = 0;
for(int i=0; i<nElements; i++)
{
if(target->rating[i] != '?' && target->rating[i] != -1)
{
total++;
}
}
return total;
}
// clculo em GPU da mdia dos rates feitos pelo usurio
__global__ void cudaCalcUserAverageRate(float *ratings, int *dev_nElements, float result)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i < *dev_nElements)
{
result = 0.0f;
int count = 0;
for(int j=0; j < *dev_nElements; j++)
{
if(ratings[j] != '?' && ratings[j] != -1)
{
count++;
result += ratings[j];
}
}
result /= count;
}
}
float calcUserAverageRate(User *target, int nElements)
{
float average = 0.0f;
int count = 0;
for(int i=0; i<nElements; i++)
{
if(target->rating[i] != '?' && target->rating[i] != -1)
{
count++;
average+= target->rating[i];
}
}
average /= count;
return average;
}
__global__ void cudaCalPearsonCorrelation(float *ratingsA, float *ratingsB, int *dev_nElements, float pearson)
{
int i = threadIdx.x;
if(i < *dev_nElements)
{
int nRates = 0;
float pearsonNum = 0.0f, pearsonDen = 0.0f; // pearson = 0.0f
float tempA = 0.0f, tempB = 0.0f, tempC = 0.0f, tempD = 0.0f;
if(ratingsA[i] == -1 || ratingsB[i] == -1)
{
// DO NOTHING, someone hasn't evaluated an item
}
else // if both users have rated this item
{
nRates++;
tempA += ratingsA[i]; //stores the Sum of the rates from A
tempB += ratingsB[i]; //stores the Sum of the rates from B
pearsonNum += ratingsA[i] * ratingsB[i];
tempC += powf(ratingsA[i], 2); // stores the Sum of the rate from A
tempD += powf(ratingsB[i], 2); // stores the Sum of the rate from B
}
pearsonNum -= (tempA * tempB) / nRates; // sumXY - ((sumX)(sumY)/n)
tempC -= powf(tempA, 2) / nRates; // sumX - ((sumX)/n)
tempD -= powf(tempB, 2) / nRates; // sumY - ((sumY)/n)
pearsonDen = sqrtf(tempC * tempD);
pearson = pearsonNum / pearsonDen;
}
}
float calcPearsonCorrelation(User *a, User *b, int nElements)
{
float pearson = 0.0f, pearsonNum = 0.0f, pearsonDen = 0.0f;
float tempA = 0.0f, tempB = 0.0f, tempC = 0.0f, tempD = 0.0f;
// int ratedItens[MAXITENS]; // Store the itens that are rated by both users | 1 = rated by both, 0 otherwise
int nRates = 0;
// loop that verify the itens that has been rated by each user and set the itens that can be used to calculate the similarity
for(int i=0; i<nElements; i++)
{
if(a->rating[i] == -1 || b->rating[i] == -1)
{
continue; // someone hasn't evaluated an item
}
else // if both users have rated this item
{
// ratedItens[i] = 1;
nRates++;
tempA += a->rating[i]; //stores the Sum of the rates from A
tempB += b->rating[i]; //stores the Sum of the rates from B
pearsonNum += a->rating[i] * b->rating[i];
tempC += pow(a->rating[i], 2); // stores the Sum of the rate from A
tempD += pow(b->rating[i], 2); // stores the Sum of the rate from B
}
}
pearsonNum -= (tempA * tempB) / nRates; // sumXY - ((sumX)(sumY)/n)
tempC -= pow(tempA, 2) / nRates; // sumX - ((sumX)/n)
tempD -= pow(tempB, 2) / nRates; // sumY - ((sumY)/n)
pearsonDen = sqrt(tempC * tempD);
pearson = pearsonNum / pearsonDen;
//printf("Pearson entre %s e %s = %.2f\n", a->name, b-> name, pearson);
return pearson;
}
float predictRateByUser(User **array, int a_size, User *desired, int itemID, int nElements)
{
float pred = -0.5f, demRes = 0, numRes = 0, pearson = 0, des_average = 0, cur_average = 0, temp_average = 0;
float *dev_ratings;
hipMalloc(&dev_ratings, nElements*sizeof(float));
hipMemcpy(dev_ratings, desired->rating, nElements*sizeof(float), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( cudaCalcUserAverageRate), dim3(32), dim3(1), 0, 0, desired->rating, &nElements, des_average);
hipFree(dev_ratings);
for(int i=0; i<a_size; i++)
{
if(desired == array[i])
{
continue; // represents itself
}
else if(array[i]->rating[itemID] == -1)
{
continue; // marks to skip in case it hasn't evaluate
}
else
{
hipLaunchKernelGGL(( cudaCalPearsonCorrelation), dim3(32), dim3(1), 0, 0, desired->rating, array[i]->rating, &nElements, pearson);
if(pearson >= 0.7)
{
demRes += pearson;
hipMalloc(&dev_ratings, nElements*sizeof(float));
hipMemcpy(dev_ratings, desired->rating, nElements*sizeof(float), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( cudaCalcUserAverageRate), dim3(32), dim3(1), 0, 0, desired->rating, &nElements, temp_average);
cur_average = array[i]->rating[itemID] - temp_average;
hipFree(dev_ratings);
numRes += (pearson * cur_average);
}
}
}
pred = des_average + (numRes / demRes);
return pred;
}
int main(int argc, char *argv[])
{
int targetItemId = -1, nElements = 0, timesReaded = 0, maxLines = 0;
char *targetUserName, readBuffer[1024], charBuffer, *filename;
FILE *fp;
User *targetUser = NULL;
if(argc != 4)
{
fprintf(stderr,"Formato : %s Arquivo (string) Usurio (string) Item# (int)\n",argv[0]);
return 1;
}
filename = argv[1];
targetUserName = argv[2];
sscanf(argv[3], "%d", &targetItemId);
targetItemId--;
fp = fopen(filename, "r");
if(fp == NULL)
{
printf("File couldn't be loaded!\n");
return 0;
}
while(feof(fp) == 0)
{
charBuffer = fgetc(fp);
if(charBuffer == '\n')
{
maxLines++; // ammount of rows on the file
}
}
rewind(fp); // reset buffer position
maxLines--; // removes the header line
fscanf(fp, "%s", readBuffer); // header line
int size = strlen(readBuffer);
for(int i=0; i<size; i++)
{
if(readBuffer[i] == ';')
{
nElements++; // stores the ammount of columns in the file
}
}
if(targetItemId > nElements)
{
fprintf(stderr,"Escopo incorreto!\nNo existe a coluna %d, este arquivo possui apenas %d colunas!\n\n", targetItemId, nElements );
return EXIT_FAILURE;
}
User *usersArray[maxLines];
while(feof(fp) == 0 && timesReaded < maxLines)
{
fscanf(fp, "%s", readBuffer);
if(readBuffer[0] != '\0')
{
usersArray[timesReaded] = generateUser(readBuffer, nElements);
timesReaded++;
readBuffer[0] = '\0';
}
}
fclose(fp);
timesReaded = 0; // reseta o valor
// finding the user
for(int i=0; i<maxLines; i++)
{
if(strcmp(usersArray[i]->name, targetUserName) == 0)
{
targetUser = usersArray[i];
}
// contabiliza o # de vezes que esse valor foi medido
if(usersArray[i]->rating[targetItemId] != -1)
{
timesReaded++;
}
}
if(targetUser == NULL)
{
printf("Usurio no encontrado!\n");
return EXIT_FAILURE;
}
else
{
printf("----------------------| Usurio %s |----------------------\n", targetUser->name);
printf("Avaliou %d Itens.\n", findAmmountOfRatesByUser(targetUser, nElements));
// usurio avaliou o item
if(targetUser->rating[targetItemId] != -1)
{
printf("Avaliou o Item solicitado: %.2f.\n", targetUser->rating[targetItemId]);
}
// usurio no avaliou o item
else
{
float pred1 = -1.0f, pred2 = -1.0f;
pred1 = predictRateByUser(usersArray, maxLines, targetUser, targetItemId, nElements);
printf("No avaliou o Item Solicitado!!!\n\t|-Previso por Usurio: %.2f\n\t|-Previso por Item: %.2f\n", pred1, pred2);
}
printf("O Item Pesquisado foi avaliado %d vezes\n", timesReaded);
printf("--------------------------------------------------------\n");
return EXIT_SUCCESS;
}
}
| eb47e8fba4e0ea76829b13fa3797192cc3d6d019.cu | #include "trabalho.h"
User *generateUser(char *buffer, int nRatings)
{
int pos = 0, ratePos = 0, state = 0, size;
float rateAux = -1.0f;
User *newUser = (User*)malloc(sizeof(User));
newUser->name = "None";
newUser->rating = (float*) calloc(nRatings, sizeof(float));
char *tempBuffer = "";
size = strlen(buffer);
do {
if(state == 0 && buffer[pos] == ';')
{
tempBuffer = buffer;
strtok(tempBuffer, ";");
newUser->name = (char*)malloc(strlen(tempBuffer) * sizeof(char));
newUser->name = strcpy(newUser->name, tempBuffer);
state = 1;
}
else if(state == 1 && buffer[pos] != ';')
{
// ratings
switch(buffer[pos])
{
case '?':
newUser->rating[ratePos] = -1;
break;
default:
sscanf(&buffer[pos], "%f", &rateAux);
newUser->rating[ratePos] = rateAux;
rateAux = -1.0f;
break;
}
ratePos++;
}
pos++;
}while(pos < size);
return newUser;
}
int findAmmountOfRatesByUser(User *target, int nElements)
{
int total = 0;
for(int i=0; i<nElements; i++)
{
if(target->rating[i] != '?' && target->rating[i] != -1)
{
total++;
}
}
return total;
}
// cálculo em GPU da média dos rates feitos pelo usuário
__global__ void cudaCalcUserAverageRate(float *ratings, int *dev_nElements, float result)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i < *dev_nElements)
{
result = 0.0f;
int count = 0;
for(int j=0; j < *dev_nElements; j++)
{
if(ratings[j] != '?' && ratings[j] != -1)
{
count++;
result += ratings[j];
}
}
result /= count;
}
}
float calcUserAverageRate(User *target, int nElements)
{
float average = 0.0f;
int count = 0;
for(int i=0; i<nElements; i++)
{
if(target->rating[i] != '?' && target->rating[i] != -1)
{
count++;
average+= target->rating[i];
}
}
average /= count;
return average;
}
__global__ void cudaCalPearsonCorrelation(float *ratingsA, float *ratingsB, int *dev_nElements, float pearson)
{
int i = threadIdx.x;
if(i < *dev_nElements)
{
int nRates = 0;
float pearsonNum = 0.0f, pearsonDen = 0.0f; // pearson = 0.0f
float tempA = 0.0f, tempB = 0.0f, tempC = 0.0f, tempD = 0.0f;
if(ratingsA[i] == -1 || ratingsB[i] == -1)
{
// DO NOTHING, someone hasn't evaluated an item
}
else // if both users have rated this item
{
nRates++;
tempA += ratingsA[i]; //stores the Sum of the rates from A
tempB += ratingsB[i]; //stores the Sum of the rates from B
pearsonNum += ratingsA[i] * ratingsB[i];
tempC += powf(ratingsA[i], 2); // stores the Sum of the rate² from A
tempD += powf(ratingsB[i], 2); // stores the Sum of the rate² from B
}
pearsonNum -= (tempA * tempB) / nRates; // sumXY - ((sumX)(sumY)/n)
tempC -= powf(tempA, 2) / nRates; // sumX² - ((sumX)²/n)
tempD -= powf(tempB, 2) / nRates; // sumY² - ((sumY)²/n)
pearsonDen = sqrtf(tempC * tempD);
pearson = pearsonNum / pearsonDen;
}
}
float calcPearsonCorrelation(User *a, User *b, int nElements)
{
float pearson = 0.0f, pearsonNum = 0.0f, pearsonDen = 0.0f;
float tempA = 0.0f, tempB = 0.0f, tempC = 0.0f, tempD = 0.0f;
// int ratedItens[MAXITENS]; // Store the itens that are rated by both users | 1 = rated by both, 0 otherwise
int nRates = 0;
// loop that verify the itens that has been rated by each user and set the itens that can be used to calculate the similarity
for(int i=0; i<nElements; i++)
{
if(a->rating[i] == -1 || b->rating[i] == -1)
{
continue; // someone hasn't evaluated an item
}
else // if both users have rated this item
{
// ratedItens[i] = 1;
nRates++;
tempA += a->rating[i]; //stores the Sum of the rates from A
tempB += b->rating[i]; //stores the Sum of the rates from B
pearsonNum += a->rating[i] * b->rating[i];
tempC += pow(a->rating[i], 2); // stores the Sum of the rate² from A
tempD += pow(b->rating[i], 2); // stores the Sum of the rate² from B
}
}
pearsonNum -= (tempA * tempB) / nRates; // sumXY - ((sumX)(sumY)/n)
tempC -= pow(tempA, 2) / nRates; // sumX² - ((sumX)²/n)
tempD -= pow(tempB, 2) / nRates; // sumY² - ((sumY)²/n)
pearsonDen = sqrt(tempC * tempD);
pearson = pearsonNum / pearsonDen;
//printf("Pearson entre %s e %s = %.2f\n", a->name, b-> name, pearson);
return pearson;
}
float predictRateByUser(User **array, int a_size, User *desired, int itemID, int nElements)
{
float pred = -0.5f, demRes = 0, numRes = 0, pearson = 0, des_average = 0, cur_average = 0, temp_average = 0;
float *dev_ratings;
cudaMalloc(&dev_ratings, nElements*sizeof(float));
cudaMemcpy(dev_ratings, desired->rating, nElements*sizeof(float), cudaMemcpyHostToDevice);
cudaCalcUserAverageRate<<<32, 1>>>(desired->rating, &nElements, des_average);
cudaFree(dev_ratings);
for(int i=0; i<a_size; i++)
{
if(desired == array[i])
{
continue; // represents itself
}
else if(array[i]->rating[itemID] == -1)
{
continue; // marks to skip in case it hasn't evaluate
}
else
{
cudaCalPearsonCorrelation<<<32, 1>>>(desired->rating, array[i]->rating, &nElements, pearson);
if(pearson >= 0.7)
{
demRes += pearson;
cudaMalloc(&dev_ratings, nElements*sizeof(float));
cudaMemcpy(dev_ratings, desired->rating, nElements*sizeof(float), cudaMemcpyHostToDevice);
cudaCalcUserAverageRate<<<32, 1>>>(desired->rating, &nElements, temp_average);
cur_average = array[i]->rating[itemID] - temp_average;
cudaFree(dev_ratings);
numRes += (pearson * cur_average);
}
}
}
pred = des_average + (numRes / demRes);
return pred;
}
int main(int argc, char *argv[])
{
int targetItemId = -1, nElements = 0, timesReaded = 0, maxLines = 0;
char *targetUserName, readBuffer[1024], charBuffer, *filename;
FILE *fp;
User *targetUser = NULL;
if(argc != 4)
{
fprintf(stderr,"Formato : %s Arquivo (string) Usuário (string) Item# (int)\n",argv[0]);
return 1;
}
filename = argv[1];
targetUserName = argv[2];
sscanf(argv[3], "%d", &targetItemId);
targetItemId--;
fp = fopen(filename, "r");
if(fp == NULL)
{
printf("File couldn't be loaded!\n");
return 0;
}
while(feof(fp) == 0)
{
charBuffer = fgetc(fp);
if(charBuffer == '\n')
{
maxLines++; // ammount of rows on the file
}
}
rewind(fp); // reset buffer position
maxLines--; // removes the header line
fscanf(fp, "%s", readBuffer); // header line
int size = strlen(readBuffer);
for(int i=0; i<size; i++)
{
if(readBuffer[i] == ';')
{
nElements++; // stores the ammount of columns in the file
}
}
if(targetItemId > nElements)
{
fprintf(stderr,"Escopo incorreto!\nNão existe a coluna %d, este arquivo possui apenas %d colunas!\n\n", targetItemId, nElements );
return EXIT_FAILURE;
}
User *usersArray[maxLines];
while(feof(fp) == 0 && timesReaded < maxLines)
{
fscanf(fp, "%s", readBuffer);
if(readBuffer[0] != '\0')
{
usersArray[timesReaded] = generateUser(readBuffer, nElements);
timesReaded++;
readBuffer[0] = '\0';
}
}
fclose(fp);
timesReaded = 0; // reseta o valor
// finding the user
for(int i=0; i<maxLines; i++)
{
if(strcmp(usersArray[i]->name, targetUserName) == 0)
{
targetUser = usersArray[i];
}
// contabiliza o # de vezes que esse valor foi medido
if(usersArray[i]->rating[targetItemId] != -1)
{
timesReaded++;
}
}
if(targetUser == NULL)
{
printf("Usuário não encontrado!\n");
return EXIT_FAILURE;
}
else
{
printf("----------------------| Usuário %s |----------------------\n", targetUser->name);
printf("Avaliou %d Itens.\n", findAmmountOfRatesByUser(targetUser, nElements));
// usuário avaliou o item
if(targetUser->rating[targetItemId] != -1)
{
printf("Avaliou o Item solicitado: %.2f.\n", targetUser->rating[targetItemId]);
}
// usuário não avaliou o item
else
{
float pred1 = -1.0f, pred2 = -1.0f;
pred1 = predictRateByUser(usersArray, maxLines, targetUser, targetItemId, nElements);
printf("Não avaliou o Item Solicitado!!!\n\t|-Previsão por Usuário: %.2f\n\t|-Previsão por Item: %.2f\n", pred1, pred2);
}
printf("O Item Pesquisado foi avaliado %d vezes\n", timesReaded);
printf("--------------------------------------------------------\n");
return EXIT_SUCCESS;
}
}
|
4d6d07848f5ecd7148f9b2a2df97a4384f186894.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include "cuda_prac.h"
__global__ void mathKernel1(float *c){
int tid = blockIdx.x + blockDim.x + threadIdx.x;
float ia, ib;
ia = 0.0f;
ib = 0.0f;
if(tid % 2 == 0)
ia = 100.0f;
else
ib = 200.0f;
c[tid] = ia + ib;
}
__global__ void mathKernel2(float *c){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
float ia, ib;
ia = 0.0f;
ib = 0.0f;
if((tid / warpSize) % 2 == 0)
ia = 100.0f;
else
ib = 200.0f;
c[tid] = ia + ib;
}
__global__ void mathKernel3(float *c){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
float ia, ib;
ia = 0.0f;
ib = 0.0f;
bool ipred = (tid % 2 == 0);
if(ipred)
ia = 100.0f;
if(!ipred)
ib = 200.0f;
c[tid] = ia + ib;
}
__global__ void mathKernel4(float *c){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
float ia, ib;
ia = 0.0f;
ib = 0.0f;
int itid = tid >> 5;
if(itid & 0x01 == 0)
ia = 100.0f;
else
ib = 200.0f;
c[tid] = ia + ib;
}
__global__ void warmingup(float *c){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
float ia, ib;
ia = 0.0f;
ib = 0.0f;
if((tid / warpSize) %2 == 0)
ia = 100.0f;
else
ib = 200.0f;
c[tid] = ia + ib;
}
int main(int argc, char** argv){
int dev = 0;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("%s using Device %d: %s\n", argv[0], dev, deviceProp.name);
int size = 512;
int blocksize = 512;
if(argc > 1)
blocksize = atoi(argv[1]);
if(argc > 2)
size = atoi(argv[2]);
printf("Data size %d ", size);
dim3 block(blocksize, 1);
dim3 grid((size + block.x - 1) / block.x, 1);
printf("Execution configure (block %d grid %d)\n", block.x, grid.x);
float *d_C;
size_t nBytes = size * sizeof(float);
CHECK(hipMalloc((float **) &d_C, nBytes));
double iStart, iElaps;
CHECK(hipDeviceSynchronize());
iStart = cpuSecond();
hipLaunchKernelGGL(( warmingup) , dim3(grid), dim3(block), 0, 0, d_C);
CHECK(hipDeviceSynchronize());
iElaps = cpuSecond() - iStart;
printf("warmup <<<%4d %4d>>> elapsed %f sec \n", grid.x, block.x, iElaps);
CHECK(hipGetLastError());
iStart = cpuSecond();
hipLaunchKernelGGL(( mathKernel1) , dim3(grid), dim3(block), 0, 0, d_C);
CHECK(hipDeviceSynchronize());
iElaps = cpuSecond() - iStart;
printf("mathKernel1 <<<%4d %4d>>> elapsed %f sec \n", grid.x, block.x, iElaps);
CHECK(hipGetLastError());
iStart = cpuSecond();
hipLaunchKernelGGL(( mathKernel2) , dim3(grid), dim3(block), 0, 0, d_C);
CHECK(hipDeviceSynchronize());
iElaps = cpuSecond() -iStart;
printf("mathKernel2 <<<%4d %4d>>> elapsed %f sec \n", grid.x, block.x, iElaps);
CHECK(hipGetLastError());
iStart = cpuSecond();
hipLaunchKernelGGL(( mathKernel3) , dim3(grid), dim3(block), 0, 0, d_C);
CHECK(hipDeviceSynchronize());
iElaps = cpuSecond() - iStart;
printf("mathKernel3 <<<%4d %4d>>> elapsed %f sec \n", grid.x, block.x, iElaps);
CHECK(hipGetLastError());
iStart = cpuSecond();
hipLaunchKernelGGL(( mathKernel4) , dim3(grid), dim3(block), 0, 0, d_C);
CHECK(hipDeviceSynchronize());
iElaps = cpuSecond() - iStart;
printf("mathKernel4 <<<%4d %4d>>> elapsed %f sec \n", grid.x, block.x, iElaps);
CHECK(hipGetLastError());
CHECK(hipFree(d_C));
CHECK(hipDeviceReset());
return EXIT_SUCCESS;
}
| 4d6d07848f5ecd7148f9b2a2df97a4384f186894.cu | #include <stdio.h>
#include <cuda_runtime.h>
#include "cuda_prac.h"
__global__ void mathKernel1(float *c){
int tid = blockIdx.x + blockDim.x + threadIdx.x;
float ia, ib;
ia = 0.0f;
ib = 0.0f;
if(tid % 2 == 0)
ia = 100.0f;
else
ib = 200.0f;
c[tid] = ia + ib;
}
__global__ void mathKernel2(float *c){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
float ia, ib;
ia = 0.0f;
ib = 0.0f;
if((tid / warpSize) % 2 == 0)
ia = 100.0f;
else
ib = 200.0f;
c[tid] = ia + ib;
}
__global__ void mathKernel3(float *c){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
float ia, ib;
ia = 0.0f;
ib = 0.0f;
bool ipred = (tid % 2 == 0);
if(ipred)
ia = 100.0f;
if(!ipred)
ib = 200.0f;
c[tid] = ia + ib;
}
__global__ void mathKernel4(float *c){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
float ia, ib;
ia = 0.0f;
ib = 0.0f;
int itid = tid >> 5;
if(itid & 0x01 == 0)
ia = 100.0f;
else
ib = 200.0f;
c[tid] = ia + ib;
}
__global__ void warmingup(float *c){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
float ia, ib;
ia = 0.0f;
ib = 0.0f;
if((tid / warpSize) %2 == 0)
ia = 100.0f;
else
ib = 200.0f;
c[tid] = ia + ib;
}
int main(int argc, char** argv){
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("%s using Device %d: %s\n", argv[0], dev, deviceProp.name);
int size = 512;
int blocksize = 512;
if(argc > 1)
blocksize = atoi(argv[1]);
if(argc > 2)
size = atoi(argv[2]);
printf("Data size %d ", size);
dim3 block(blocksize, 1);
dim3 grid((size + block.x - 1) / block.x, 1);
printf("Execution configure (block %d grid %d)\n", block.x, grid.x);
float *d_C;
size_t nBytes = size * sizeof(float);
CHECK(cudaMalloc((float **) &d_C, nBytes));
double iStart, iElaps;
CHECK(cudaDeviceSynchronize());
iStart = cpuSecond();
warmingup <<<grid, block>>> (d_C);
CHECK(cudaDeviceSynchronize());
iElaps = cpuSecond() - iStart;
printf("warmup <<<%4d %4d>>> elapsed %f sec \n", grid.x, block.x, iElaps);
CHECK(cudaGetLastError());
iStart = cpuSecond();
mathKernel1 <<<grid, block>>> (d_C);
CHECK(cudaDeviceSynchronize());
iElaps = cpuSecond() - iStart;
printf("mathKernel1 <<<%4d %4d>>> elapsed %f sec \n", grid.x, block.x, iElaps);
CHECK(cudaGetLastError());
iStart = cpuSecond();
mathKernel2 <<<grid, block>>> (d_C);
CHECK(cudaDeviceSynchronize());
iElaps = cpuSecond() -iStart;
printf("mathKernel2 <<<%4d %4d>>> elapsed %f sec \n", grid.x, block.x, iElaps);
CHECK(cudaGetLastError());
iStart = cpuSecond();
mathKernel3 <<<grid, block>>> (d_C);
CHECK(cudaDeviceSynchronize());
iElaps = cpuSecond() - iStart;
printf("mathKernel3 <<<%4d %4d>>> elapsed %f sec \n", grid.x, block.x, iElaps);
CHECK(cudaGetLastError());
iStart = cpuSecond();
mathKernel4 <<<grid, block>>> (d_C);
CHECK(cudaDeviceSynchronize());
iElaps = cpuSecond() - iStart;
printf("mathKernel4 <<<%4d %4d>>> elapsed %f sec \n", grid.x, block.x, iElaps);
CHECK(cudaGetLastError());
CHECK(cudaFree(d_C));
CHECK(cudaDeviceReset());
return EXIT_SUCCESS;
}
|
73b44f1afae75e46ed7803401b4d6648707b422a.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2017-2020 XGBoost contributors
*/
#include <thrust/copy.h>
#include <thrust/reduce.h>
#include <xgboost/tree_updater.h>
#include <algorithm>
#include <cmath>
#include <memory>
#include <limits>
#include <queue>
#include <utility>
#include <vector>
#include "xgboost/host_device_vector.h"
#include "xgboost/parameter.h"
#include "xgboost/span.h"
#include "xgboost/json.h"
#include "../common/io.h"
#include "../common/device_helpers.cuh"
#include "../common/hist_util.h"
#include "../common/timer.h"
#include "../data/ellpack_page.cuh"
#include "param.h"
#include "updater_gpu_common.cuh"
#include "constraints.cuh"
#include "gpu_hist/gradient_based_sampler.cuh"
#include "gpu_hist/row_partitioner.cuh"
#include "gpu_hist/histogram.cuh"
#include "gpu_hist/evaluate_splits.cuh"
namespace xgboost {
namespace tree {
#if !defined(GTEST_TEST)
DMLC_REGISTRY_FILE_TAG(updater_gpu_hist);
#endif // !defined(GTEST_TEST)
// training parameters specific to this algorithm
struct GPUHistMakerTrainParam
: public XGBoostParameter<GPUHistMakerTrainParam> {
bool single_precision_histogram;
bool deterministic_histogram;
bool debug_synchronize;
// declare parameters
DMLC_DECLARE_PARAMETER(GPUHistMakerTrainParam) {
DMLC_DECLARE_FIELD(single_precision_histogram).set_default(false).describe(
"Use single precision to build histograms.");
DMLC_DECLARE_FIELD(deterministic_histogram).set_default(true).describe(
"Pre-round the gradient for obtaining deterministic gradient histogram.");
DMLC_DECLARE_FIELD(debug_synchronize).set_default(false).describe(
"Check if all distributed tree are identical after tree construction.");
}
};
#if !defined(GTEST_TEST)
DMLC_REGISTER_PARAMETER(GPUHistMakerTrainParam);
#endif // !defined(GTEST_TEST)
struct ExpandEntry {
int nid;
int depth;
DeviceSplitCandidate split;
uint64_t timestamp;
ExpandEntry() = default;
ExpandEntry(int nid, int depth, DeviceSplitCandidate split,
uint64_t timestamp)
: nid(nid), depth(depth), split(std::move(split)), timestamp(timestamp) {}
bool IsValid(const TrainParam& param, int num_leaves) const {
if (split.loss_chg <= kRtEps) return false;
if (split.left_sum.GetHess() == 0 || split.right_sum.GetHess() == 0) {
return false;
}
if (split.loss_chg < param.min_split_loss) { return false; }
if (param.max_depth > 0 && depth == param.max_depth) {return false; }
if (param.max_leaves > 0 && num_leaves == param.max_leaves) { return false; }
return true;
}
static bool ChildIsValid(const TrainParam& param, int depth, int num_leaves) {
if (param.max_depth > 0 && depth >= param.max_depth) return false;
if (param.max_leaves > 0 && num_leaves >= param.max_leaves) return false;
return true;
}
friend std::ostream& operator<<(std::ostream& os, const ExpandEntry& e) {
os << "ExpandEntry: \n";
os << "nidx: " << e.nid << "\n";
os << "depth: " << e.depth << "\n";
os << "loss: " << e.split.loss_chg << "\n";
os << "left_sum: " << e.split.left_sum << "\n";
os << "right_sum: " << e.split.right_sum << "\n";
return os;
}
};
inline static bool DepthWise(const ExpandEntry& lhs, const ExpandEntry& rhs) {
if (lhs.depth == rhs.depth) {
return lhs.timestamp > rhs.timestamp; // favor small timestamp
} else {
return lhs.depth > rhs.depth; // favor small depth
}
}
inline static bool LossGuide(const ExpandEntry& lhs, const ExpandEntry& rhs) {
if (lhs.split.loss_chg == rhs.split.loss_chg) {
return lhs.timestamp > rhs.timestamp; // favor small timestamp
} else {
return lhs.split.loss_chg < rhs.split.loss_chg; // favor large loss_chg
}
}
/**
* \struct DeviceHistogram
*
* \summary Data storage for node histograms on device. Automatically expands.
*
* \tparam GradientSumT histogram entry type.
* \tparam kStopGrowingSize Do not grow beyond this size
*
* \author Rory
* \date 28/07/2018
*/
template <typename GradientSumT, size_t kStopGrowingSize = 1 << 26>
class DeviceHistogram {
private:
/*! \brief Map nidx to starting index of its histogram. */
std::map<int, size_t> nidx_map_;
dh::device_vector<typename GradientSumT::ValueT> data_;
int n_bins_;
int device_id_;
static constexpr size_t kNumItemsInGradientSum =
sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT);
static_assert(kNumItemsInGradientSum == 2,
"Number of items in gradient type should be 2.");
public:
void Init(int device_id, int n_bins) {
this->n_bins_ = n_bins;
this->device_id_ = device_id;
}
void Reset() {
auto d_data = data_.data().get();
dh::LaunchN(device_id_, data_.size(),
[=] __device__(size_t idx) { d_data[idx] = 0.0f; });
nidx_map_.clear();
}
bool HistogramExists(int nidx) const {
return nidx_map_.find(nidx) != nidx_map_.cend();
}
int Bins() const {
return n_bins_;
}
size_t HistogramSize() const {
return n_bins_ * kNumItemsInGradientSum;
}
dh::device_vector<typename GradientSumT::ValueT>& Data() {
return data_;
}
void AllocateHistogram(int nidx) {
if (HistogramExists(nidx)) return;
// Number of items currently used in data
const size_t used_size = nidx_map_.size() * HistogramSize();
const size_t new_used_size = used_size + HistogramSize();
if (data_.size() >= kStopGrowingSize) {
// Recycle histogram memory
if (new_used_size <= data_.size()) {
// no need to remove old node, just insert the new one.
nidx_map_[nidx] = used_size;
// memset histogram size in bytes
} else {
std::pair<int, size_t> old_entry = *nidx_map_.begin();
nidx_map_.erase(old_entry.first);
nidx_map_[nidx] = old_entry.second;
}
// Zero recycled memory
auto d_data = data_.data().get() + nidx_map_[nidx];
dh::LaunchN(device_id_, n_bins_ * 2,
[=] __device__(size_t idx) { d_data[idx] = 0.0f; });
} else {
// Append new node histogram
nidx_map_[nidx] = used_size;
// Check there is enough memory for another histogram node
if (data_.size() < new_used_size + HistogramSize()) {
size_t new_required_memory =
::max(data_.size() * 2, HistogramSize());
data_.resize(new_required_memory);
}
}
CHECK_GE(data_.size(), nidx_map_.size() * HistogramSize());
}
/**
* \summary Return pointer to histogram memory for a given node.
* \param nidx Tree node index.
* \return hist pointer.
*/
common::Span<GradientSumT> GetNodeHistogram(int nidx) {
CHECK(this->HistogramExists(nidx));
auto ptr = data_.data().get() + nidx_map_[nidx];
return common::Span<GradientSumT>(
reinterpret_cast<GradientSumT*>(ptr), n_bins_);
}
};
struct CalcWeightTrainParam {
float min_child_weight;
float reg_alpha;
float reg_lambda;
float max_delta_step;
float learning_rate;
XGBOOST_DEVICE explicit CalcWeightTrainParam(const TrainParam& p)
: min_child_weight(p.min_child_weight),
reg_alpha(p.reg_alpha),
reg_lambda(p.reg_lambda),
max_delta_step(p.max_delta_step),
learning_rate(p.learning_rate) {}
};
// Manage memory for a single GPU
template <typename GradientSumT>
struct GPUHistMakerDevice {
int device_id;
EllpackPageImpl* page;
BatchParam batch_param;
std::unique_ptr<RowPartitioner> row_partitioner;
DeviceHistogram<GradientSumT> hist{};
common::Span<GradientPair> gpair;
dh::caching_device_vector<int> monotone_constraints;
dh::caching_device_vector<bst_float> prediction_cache;
/*! \brief Sum gradient for each node. */
std::vector<GradientPair> node_sum_gradients;
TrainParam param;
bool deterministic_histogram;
GradientSumT histogram_rounding;
std::vector<hipStream_t> streams{};
common::Monitor monitor;
std::vector<ValueConstraint> node_value_constraints;
common::ColumnSampler column_sampler;
FeatureInteractionConstraintDevice interaction_constraints;
using ExpandQueue =
std::priority_queue<ExpandEntry, std::vector<ExpandEntry>,
std::function<bool(ExpandEntry, ExpandEntry)>>;
std::unique_ptr<ExpandQueue> qexpand;
std::unique_ptr<GradientBasedSampler> sampler;
GPUHistMakerDevice(int _device_id,
EllpackPageImpl* _page,
bst_uint _n_rows,
TrainParam _param,
uint32_t column_sampler_seed,
uint32_t n_features,
bool deterministic_histogram,
BatchParam _batch_param)
: device_id(_device_id),
page(_page),
param(std::move(_param)),
column_sampler(column_sampler_seed),
interaction_constraints(param, n_features),
deterministic_histogram{deterministic_histogram},
batch_param(_batch_param) {
sampler.reset(new GradientBasedSampler(
page, _n_rows, batch_param, param.subsample, param.sampling_method));
if (!param.monotone_constraints.empty()) {
// Copy assigning an empty vector causes an exception in MSVC debug builds
monotone_constraints = param.monotone_constraints;
}
node_sum_gradients.resize(param.MaxNodes());
// Init histogram
hist.Init(device_id, page->Cuts().TotalBins());
monitor.Init(std::string("GPUHistMakerDevice") + std::to_string(device_id));
}
~GPUHistMakerDevice() { // NOLINT
dh::safe_cuda(hipSetDevice(device_id));
for (auto& stream : streams) {
dh::safe_cuda(hipStreamDestroy(stream));
}
}
// Get vector of at least n initialised streams
std::vector<hipStream_t>& GetStreams(int n) {
if (n > streams.size()) {
for (auto& stream : streams) {
dh::safe_cuda(hipStreamDestroy(stream));
}
streams.clear();
streams.resize(n);
for (auto& stream : streams) {
dh::safe_cuda(hipStreamCreate(&stream));
}
}
return streams;
}
// Reset values for each update iteration
// Note that the column sampler must be passed by value because it is not
// thread safe
void Reset(HostDeviceVector<GradientPair>* dh_gpair, DMatrix* dmat, int64_t num_columns) {
if (param.grow_policy == TrainParam::kLossGuide) {
qexpand.reset(new ExpandQueue(LossGuide));
} else {
qexpand.reset(new ExpandQueue(DepthWise));
}
this->column_sampler.Init(num_columns, param.colsample_bynode,
param.colsample_bylevel, param.colsample_bytree);
dh::safe_cuda(hipSetDevice(device_id));
this->interaction_constraints.Reset();
std::fill(node_sum_gradients.begin(), node_sum_gradients.end(),
GradientPair());
auto sample = sampler->Sample(dh_gpair->DeviceSpan(), dmat);
page = sample.page;
gpair = sample.gpair;
if (deterministic_histogram) {
histogram_rounding = CreateRoundingFactor<GradientSumT>(this->gpair);
} else {
histogram_rounding = GradientSumT{0.0, 0.0};
}
row_partitioner.reset(); // Release the device memory first before reallocating
row_partitioner.reset(new RowPartitioner(device_id, sample.sample_rows));
hist.Reset();
}
DeviceSplitCandidate EvaluateRootSplit(GradientPair root_sum) {
int nidx = 0;
dh::TemporaryArray<DeviceSplitCandidate> splits_out(1);
GPUTrainingParam gpu_param(param);
auto sampled_features = column_sampler.GetFeatureSet(0);
sampled_features->SetDevice(device_id);
common::Span<bst_feature_t> feature_set =
interaction_constraints.Query(sampled_features->DeviceSpan(), nidx);
auto matrix = page->GetDeviceAccessor(device_id);
EvaluateSplitInputs<GradientSumT> inputs{
nidx,
{root_sum.GetGrad(), root_sum.GetHess()},
gpu_param,
feature_set,
matrix.feature_segments,
matrix.gidx_fvalue_map,
matrix.min_fvalue,
hist.GetNodeHistogram(nidx),
node_value_constraints[nidx],
dh::ToSpan(monotone_constraints)};
EvaluateSingleSplit(dh::ToSpan(splits_out), inputs);
std::vector<DeviceSplitCandidate> result(1);
dh::safe_cuda(hipMemcpy(result.data(), splits_out.data().get(),
sizeof(DeviceSplitCandidate) * splits_out.size(),
hipMemcpyDeviceToHost));
return result.front();
}
std::vector<DeviceSplitCandidate> EvaluateLeftRightSplits(
ExpandEntry candidate, int left_nidx, int right_nidx,
const RegTree& tree) {
dh::TemporaryArray<DeviceSplitCandidate> splits_out(2);
GPUTrainingParam gpu_param(param);
auto left_sampled_features =
column_sampler.GetFeatureSet(tree.GetDepth(left_nidx));
left_sampled_features->SetDevice(device_id);
common::Span<bst_feature_t> left_feature_set =
interaction_constraints.Query(left_sampled_features->DeviceSpan(),
left_nidx);
auto right_sampled_features =
column_sampler.GetFeatureSet(tree.GetDepth(right_nidx));
right_sampled_features->SetDevice(device_id);
common::Span<bst_feature_t> right_feature_set =
interaction_constraints.Query(right_sampled_features->DeviceSpan(),
left_nidx);
auto matrix = page->GetDeviceAccessor(device_id);
EvaluateSplitInputs<GradientSumT> left{left_nidx,
{candidate.split.left_sum.GetGrad(),
candidate.split.left_sum.GetHess()},
gpu_param,
left_feature_set,
matrix.feature_segments,
matrix.gidx_fvalue_map,
matrix.min_fvalue,
hist.GetNodeHistogram(left_nidx),
node_value_constraints[left_nidx],
dh::ToSpan(monotone_constraints)};
EvaluateSplitInputs<GradientSumT> right{
right_nidx,
{candidate.split.right_sum.GetGrad(),
candidate.split.right_sum.GetHess()},
gpu_param,
right_feature_set,
matrix.feature_segments,
matrix.gidx_fvalue_map,
matrix.min_fvalue,
hist.GetNodeHistogram(right_nidx),
node_value_constraints[right_nidx],
dh::ToSpan(monotone_constraints)};
EvaluateSplits(dh::ToSpan(splits_out), left, right);
std::vector<DeviceSplitCandidate> result(2);
dh::safe_cuda(hipMemcpy(result.data(), splits_out.data().get(),
sizeof(DeviceSplitCandidate) * splits_out.size(),
hipMemcpyDeviceToHost));
return result;
}
void BuildHist(int nidx) {
hist.AllocateHistogram(nidx);
auto d_node_hist = hist.GetNodeHistogram(nidx);
auto d_ridx = row_partitioner->GetRows(nidx);
BuildGradientHistogram(page->GetDeviceAccessor(device_id), gpair, d_ridx, d_node_hist,
histogram_rounding);
}
void SubtractionTrick(int nidx_parent, int nidx_histogram,
int nidx_subtraction) {
auto d_node_hist_parent = hist.GetNodeHistogram(nidx_parent);
auto d_node_hist_histogram = hist.GetNodeHistogram(nidx_histogram);
auto d_node_hist_subtraction = hist.GetNodeHistogram(nidx_subtraction);
dh::LaunchN(device_id, page->Cuts().TotalBins(), [=] __device__(size_t idx) {
d_node_hist_subtraction[idx] =
d_node_hist_parent[idx] - d_node_hist_histogram[idx];
});
}
bool CanDoSubtractionTrick(int nidx_parent, int nidx_histogram,
int nidx_subtraction) {
// Make sure histograms are already allocated
hist.AllocateHistogram(nidx_subtraction);
return hist.HistogramExists(nidx_histogram) &&
hist.HistogramExists(nidx_parent);
}
void UpdatePosition(int nidx, RegTree::Node split_node) {
auto d_matrix = page->GetDeviceAccessor(device_id);
row_partitioner->UpdatePosition(
nidx, split_node.LeftChild(), split_node.RightChild(),
[=] __device__(bst_uint ridx) {
// given a row index, returns the node id it belongs to
bst_float cut_value =
d_matrix.GetFvalue(ridx, split_node.SplitIndex());
// Missing value
int new_position = 0;
if (isnan(cut_value)) {
new_position = split_node.DefaultChild();
} else {
if (cut_value <= split_node.SplitCond()) {
new_position = split_node.LeftChild();
} else {
new_position = split_node.RightChild();
}
}
return new_position;
});
}
// After tree update is finished, update the position of all training
// instances to their final leaf. This information is used later to update the
// prediction cache
void FinalisePosition(RegTree const* p_tree, DMatrix* p_fmat) {
dh::TemporaryArray<RegTree::Node> d_nodes(p_tree->GetNodes().size());
dh::safe_cuda(hipMemcpy(d_nodes.data().get(), p_tree->GetNodes().data(),
d_nodes.size() * sizeof(RegTree::Node),
hipMemcpyHostToDevice));
if (row_partitioner->GetRows().size() != p_fmat->Info().num_row_) {
row_partitioner.reset(); // Release the device memory first before reallocating
row_partitioner.reset(new RowPartitioner(device_id, p_fmat->Info().num_row_));
}
if (page->n_rows == p_fmat->Info().num_row_) {
FinalisePositionInPage(page, dh::ToSpan(d_nodes));
} else {
for (auto& batch : p_fmat->GetBatches<EllpackPage>(batch_param)) {
FinalisePositionInPage(batch.Impl(), dh::ToSpan(d_nodes));
}
}
}
void FinalisePositionInPage(EllpackPageImpl* page, const common::Span<RegTree::Node> d_nodes) {
auto d_matrix = page->GetDeviceAccessor(device_id);
row_partitioner->FinalisePosition(
[=] __device__(size_t row_id, int position) {
if (!d_matrix.IsInRange(row_id)) {
return RowPartitioner::kIgnoredTreePosition;
}
auto node = d_nodes[position];
while (!node.IsLeaf()) {
bst_float element = d_matrix.GetFvalue(row_id, node.SplitIndex());
// Missing value
if (isnan(element)) {
position = node.DefaultChild();
} else {
if (element <= node.SplitCond()) {
position = node.LeftChild();
} else {
position = node.RightChild();
}
}
node = d_nodes[position];
}
return position;
});
}
void UpdatePredictionCache(bst_float* out_preds_d) {
dh::safe_cuda(hipSetDevice(device_id));
auto d_ridx = row_partitioner->GetRows();
if (prediction_cache.size() != d_ridx.size()) {
prediction_cache.resize(d_ridx.size());
dh::safe_cuda(hipMemcpyAsync(prediction_cache.data().get(), out_preds_d,
prediction_cache.size() * sizeof(bst_float),
hipMemcpyDefault));
}
CalcWeightTrainParam param_d(param);
dh::TemporaryArray<GradientPair> device_node_sum_gradients(node_sum_gradients.size());
dh::safe_cuda(
hipMemcpyAsync(device_node_sum_gradients.data().get(), node_sum_gradients.data(),
sizeof(GradientPair) * node_sum_gradients.size(),
hipMemcpyHostToDevice));
auto d_position = row_partitioner->GetPosition();
auto d_node_sum_gradients = device_node_sum_gradients.data().get();
auto d_prediction_cache = prediction_cache.data().get();
dh::LaunchN(
device_id, prediction_cache.size(), [=] __device__(int local_idx) {
int pos = d_position[local_idx];
bst_float weight = CalcWeight(param_d, d_node_sum_gradients[pos]);
d_prediction_cache[d_ridx[local_idx]] +=
weight * param_d.learning_rate;
});
dh::safe_cuda(hipMemcpy(
out_preds_d, prediction_cache.data().get(),
prediction_cache.size() * sizeof(bst_float), hipMemcpyDefault));
row_partitioner.reset();
}
void AllReduceHist(int nidx, dh::AllReducer* reducer) {
monitor.Start("AllReduce");
auto d_node_hist = hist.GetNodeHistogram(nidx).data();
reducer->AllReduceSum(
reinterpret_cast<typename GradientSumT::ValueT*>(d_node_hist),
reinterpret_cast<typename GradientSumT::ValueT*>(d_node_hist),
page->Cuts().TotalBins() * (sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT)));
reducer->Synchronize();
monitor.Stop("AllReduce");
}
/**
* \brief Build GPU local histograms for the left and right child of some parent node
*/
void BuildHistLeftRight(const ExpandEntry &candidate, int nidx_left,
int nidx_right, dh::AllReducer* reducer) {
auto build_hist_nidx = nidx_left;
auto subtraction_trick_nidx = nidx_right;
// Decide whether to build the left histogram or right histogram
// Use sum of Hessian as a heuristic to select node with fewest training instances
bool fewer_right = candidate.split.right_sum.GetHess() < candidate.split.left_sum.GetHess();
if (fewer_right) {
std::swap(build_hist_nidx, subtraction_trick_nidx);
}
this->BuildHist(build_hist_nidx);
this->AllReduceHist(build_hist_nidx, reducer);
// Check whether we can use the subtraction trick to calculate the other
bool do_subtraction_trick = this->CanDoSubtractionTrick(
candidate.nid, build_hist_nidx, subtraction_trick_nidx);
if (do_subtraction_trick) {
// Calculate other histogram using subtraction trick
this->SubtractionTrick(candidate.nid, build_hist_nidx,
subtraction_trick_nidx);
} else {
// Calculate other histogram manually
this->BuildHist(subtraction_trick_nidx);
this->AllReduceHist(subtraction_trick_nidx, reducer);
}
}
void ApplySplit(const ExpandEntry& candidate, RegTree* p_tree) {
RegTree& tree = *p_tree;
node_value_constraints.resize(tree.GetNodes().size());
auto parent_sum = candidate.split.left_sum + candidate.split.right_sum;
auto base_weight = node_value_constraints[candidate.nid].CalcWeight(
param, parent_sum);
auto left_weight = node_value_constraints[candidate.nid].CalcWeight(
param, candidate.split.left_sum) *
param.learning_rate;
auto right_weight = node_value_constraints[candidate.nid].CalcWeight(
param, candidate.split.right_sum) *
param.learning_rate;
tree.ExpandNode(candidate.nid, candidate.split.findex,
candidate.split.fvalue, candidate.split.dir == kLeftDir,
base_weight, left_weight, right_weight,
candidate.split.loss_chg, parent_sum.GetHess(),
candidate.split.left_sum.GetHess(), candidate.split.right_sum.GetHess());
// Set up child constraints
node_value_constraints.resize(tree.GetNodes().size());
node_value_constraints[candidate.nid].SetChild(
param, tree[candidate.nid].SplitIndex(), candidate.split.left_sum,
candidate.split.right_sum,
&node_value_constraints[tree[candidate.nid].LeftChild()],
&node_value_constraints[tree[candidate.nid].RightChild()]);
node_sum_gradients[tree[candidate.nid].LeftChild()] =
candidate.split.left_sum;
node_sum_gradients[tree[candidate.nid].RightChild()] =
candidate.split.right_sum;
interaction_constraints.Split(
candidate.nid, tree[candidate.nid].SplitIndex(),
tree[candidate.nid].LeftChild(),
tree[candidate.nid].RightChild());
}
void InitRoot(RegTree* p_tree, dh::AllReducer* reducer) {
constexpr bst_node_t kRootNIdx = 0;
dh::XGBCachingDeviceAllocator<char> alloc;
GradientPair root_sum = thrust::reduce(
thrust::hip::par(alloc),
thrust::device_ptr<GradientPair const>(gpair.data()),
thrust::device_ptr<GradientPair const>(gpair.data() + gpair.size()));
rabit::Allreduce<rabit::op::Sum, float>(reinterpret_cast<float*>(&root_sum),
2);
this->BuildHist(kRootNIdx);
this->AllReduceHist(kRootNIdx, reducer);
// Remember root stats
node_sum_gradients[kRootNIdx] = root_sum;
p_tree->Stat(kRootNIdx).sum_hess = root_sum.GetHess();
auto weight = CalcWeight(param, root_sum);
p_tree->Stat(kRootNIdx).base_weight = weight;
(*p_tree)[kRootNIdx].SetLeaf(param.learning_rate * weight);
// Initialise root constraint
node_value_constraints.resize(p_tree->GetNodes().size());
// Generate first split
auto split = this->EvaluateRootSplit(root_sum);
qexpand->push(
ExpandEntry(kRootNIdx, p_tree->GetDepth(kRootNIdx), split, 0));
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair_all, DMatrix* p_fmat,
RegTree* p_tree, dh::AllReducer* reducer) {
auto& tree = *p_tree;
monitor.Start("Reset");
this->Reset(gpair_all, p_fmat, p_fmat->Info().num_col_);
monitor.Stop("Reset");
monitor.Start("InitRoot");
this->InitRoot(p_tree, reducer);
monitor.Stop("InitRoot");
auto timestamp = qexpand->size();
auto num_leaves = 1;
while (!qexpand->empty()) {
ExpandEntry candidate = qexpand->top();
qexpand->pop();
if (!candidate.IsValid(param, num_leaves)) {
continue;
}
this->ApplySplit(candidate, p_tree);
num_leaves++;
int left_child_nidx = tree[candidate.nid].LeftChild();
int right_child_nidx = tree[candidate.nid].RightChild();
// Only create child entries if needed
if (ExpandEntry::ChildIsValid(param, tree.GetDepth(left_child_nidx),
num_leaves)) {
monitor.Start("UpdatePosition");
this->UpdatePosition(candidate.nid, (*p_tree)[candidate.nid]);
monitor.Stop("UpdatePosition");
monitor.Start("BuildHist");
this->BuildHistLeftRight(candidate, left_child_nidx, right_child_nidx, reducer);
monitor.Stop("BuildHist");
monitor.Start("EvaluateSplits");
auto splits = this->EvaluateLeftRightSplits(candidate, left_child_nidx,
right_child_nidx,
*p_tree);
monitor.Stop("EvaluateSplits");
qexpand->push(ExpandEntry(left_child_nidx,
tree.GetDepth(left_child_nidx), splits.at(0),
timestamp++));
qexpand->push(ExpandEntry(right_child_nidx,
tree.GetDepth(right_child_nidx),
splits.at(1), timestamp++));
}
}
monitor.Start("FinalisePosition");
this->FinalisePosition(p_tree, p_fmat);
monitor.Stop("FinalisePosition");
}
};
template <typename GradientSumT>
class GPUHistMakerSpecialised {
public:
GPUHistMakerSpecialised() = default;
void Configure(const Args& args, GenericParameter const* generic_param) {
param_.UpdateAllowUnknown(args);
generic_param_ = generic_param;
hist_maker_param_.UpdateAllowUnknown(args);
dh::CheckComputeCapability();
monitor_.Init("updater_gpu_hist");
}
~GPUHistMakerSpecialised() { // NOLINT
dh::GlobalMemoryLogger().Log();
}
void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
const std::vector<RegTree*>& trees) {
monitor_.Start("Update");
// rescale learning rate according to size of trees
float lr = param_.learning_rate;
param_.learning_rate = lr / trees.size();
ValueConstraint::Init(¶m_, dmat->Info().num_col_);
// build tree
try {
for (xgboost::RegTree* tree : trees) {
this->UpdateTree(gpair, dmat, tree);
if (hist_maker_param_.debug_synchronize) {
this->CheckTreesSynchronized(tree);
}
}
dh::safe_cuda(hipGetLastError());
} catch (const std::exception& e) {
LOG(FATAL) << "Exception in gpu_hist: " << e.what() << std::endl;
}
param_.learning_rate = lr;
monitor_.Stop("Update");
}
void InitDataOnce(DMatrix* dmat) {
device_ = generic_param_->gpu_id;
CHECK_GE(device_, 0) << "Must have at least one device";
info_ = &dmat->Info();
reducer_.Init({device_}); // NOLINT
// Synchronise the column sampling seed
uint32_t column_sampling_seed = common::GlobalRandom()();
rabit::Broadcast(&column_sampling_seed, sizeof(column_sampling_seed), 0);
BatchParam batch_param{
device_,
param_.max_bin,
generic_param_->gpu_page_size
};
auto page = (*dmat->GetBatches<EllpackPage>(batch_param).begin()).Impl();
dh::safe_cuda(hipSetDevice(device_));
maker.reset(new GPUHistMakerDevice<GradientSumT>(device_,
page,
info_->num_row_,
param_,
column_sampling_seed,
info_->num_col_,
hist_maker_param_.deterministic_histogram,
batch_param));
p_last_fmat_ = dmat;
initialised_ = true;
}
void InitData(DMatrix* dmat) {
if (!initialised_) {
monitor_.Start("InitDataOnce");
this->InitDataOnce(dmat);
monitor_.Stop("InitDataOnce");
}
}
// Only call this method for testing
void CheckTreesSynchronized(RegTree* local_tree) const {
std::string s_model;
common::MemoryBufferStream fs(&s_model);
int rank = rabit::GetRank();
if (rank == 0) {
local_tree->Save(&fs);
}
fs.Seek(0);
rabit::Broadcast(&s_model, 0);
RegTree reference_tree {}; // rank 0 tree
reference_tree.Load(&fs);
CHECK(*local_tree == reference_tree);
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair, DMatrix* p_fmat,
RegTree* p_tree) {
monitor_.Start("InitData");
this->InitData(p_fmat);
monitor_.Stop("InitData");
gpair->SetDevice(device_);
maker->UpdateTree(gpair, p_fmat, p_tree, &reducer_);
}
bool UpdatePredictionCache(const DMatrix* data, HostDeviceVector<bst_float>* p_out_preds) {
if (maker == nullptr || p_last_fmat_ == nullptr || p_last_fmat_ != data) {
return false;
}
monitor_.Start("UpdatePredictionCache");
p_out_preds->SetDevice(device_);
maker->UpdatePredictionCache(p_out_preds->DevicePointer());
monitor_.Stop("UpdatePredictionCache");
return true;
}
TrainParam param_; // NOLINT
MetaInfo* info_{}; // NOLINT
std::unique_ptr<GPUHistMakerDevice<GradientSumT>> maker; // NOLINT
private:
bool initialised_ { false };
GPUHistMakerTrainParam hist_maker_param_;
GenericParameter const* generic_param_;
dh::AllReducer reducer_;
DMatrix* p_last_fmat_ { nullptr };
int device_{-1};
common::Monitor monitor_;
};
class GPUHistMaker : public TreeUpdater {
public:
void Configure(const Args& args) override {
// Used in test to count how many configurations are performed
LOG(DEBUG) << "[GPU Hist]: Configure";
hist_maker_param_.UpdateAllowUnknown(args);
// The passed in args can be empty, if we simply purge the old maker without
// preserving parameters then we can't do Update on it.
TrainParam param;
if (float_maker_) {
param = float_maker_->param_;
} else if (double_maker_) {
param = double_maker_->param_;
}
if (hist_maker_param_.single_precision_histogram) {
float_maker_.reset(new GPUHistMakerSpecialised<GradientPair>());
float_maker_->param_ = param;
float_maker_->Configure(args, tparam_);
} else {
double_maker_.reset(new GPUHistMakerSpecialised<GradientPairPrecise>());
double_maker_->param_ = param;
double_maker_->Configure(args, tparam_);
}
}
void LoadConfig(Json const& in) override {
auto const& config = get<Object const>(in);
FromJson(config.at("gpu_hist_train_param"), &this->hist_maker_param_);
if (hist_maker_param_.single_precision_histogram) {
float_maker_.reset(new GPUHistMakerSpecialised<GradientPair>());
FromJson(config.at("train_param"), &float_maker_->param_);
} else {
double_maker_.reset(new GPUHistMakerSpecialised<GradientPairPrecise>());
FromJson(config.at("train_param"), &double_maker_->param_);
}
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["gpu_hist_train_param"] = ToJson(hist_maker_param_);
if (hist_maker_param_.single_precision_histogram) {
out["train_param"] = ToJson(float_maker_->param_);
} else {
out["train_param"] = ToJson(double_maker_->param_);
}
}
void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
const std::vector<RegTree*>& trees) override {
if (hist_maker_param_.single_precision_histogram) {
float_maker_->Update(gpair, dmat, trees);
} else {
double_maker_->Update(gpair, dmat, trees);
}
}
bool UpdatePredictionCache(
const DMatrix* data, HostDeviceVector<bst_float>* p_out_preds) override {
if (hist_maker_param_.single_precision_histogram) {
return float_maker_->UpdatePredictionCache(data, p_out_preds);
} else {
return double_maker_->UpdatePredictionCache(data, p_out_preds);
}
}
char const* Name() const override {
return "grow_gpu_hist";
}
private:
GPUHistMakerTrainParam hist_maker_param_;
std::unique_ptr<GPUHistMakerSpecialised<GradientPair>> float_maker_;
std::unique_ptr<GPUHistMakerSpecialised<GradientPairPrecise>> double_maker_;
};
#if !defined(GTEST_TEST)
XGBOOST_REGISTER_TREE_UPDATER(GPUHistMaker, "grow_gpu_hist")
.describe("Grow tree with GPU.")
.set_body([]() { return new GPUHistMaker(); });
#endif // !defined(GTEST_TEST)
} // namespace tree
} // namespace xgboost
| 73b44f1afae75e46ed7803401b4d6648707b422a.cu | /*!
* Copyright 2017-2020 XGBoost contributors
*/
#include <thrust/copy.h>
#include <thrust/reduce.h>
#include <xgboost/tree_updater.h>
#include <algorithm>
#include <cmath>
#include <memory>
#include <limits>
#include <queue>
#include <utility>
#include <vector>
#include "xgboost/host_device_vector.h"
#include "xgboost/parameter.h"
#include "xgboost/span.h"
#include "xgboost/json.h"
#include "../common/io.h"
#include "../common/device_helpers.cuh"
#include "../common/hist_util.h"
#include "../common/timer.h"
#include "../data/ellpack_page.cuh"
#include "param.h"
#include "updater_gpu_common.cuh"
#include "constraints.cuh"
#include "gpu_hist/gradient_based_sampler.cuh"
#include "gpu_hist/row_partitioner.cuh"
#include "gpu_hist/histogram.cuh"
#include "gpu_hist/evaluate_splits.cuh"
namespace xgboost {
namespace tree {
#if !defined(GTEST_TEST)
DMLC_REGISTRY_FILE_TAG(updater_gpu_hist);
#endif // !defined(GTEST_TEST)
// training parameters specific to this algorithm
struct GPUHistMakerTrainParam
: public XGBoostParameter<GPUHistMakerTrainParam> {
bool single_precision_histogram;
bool deterministic_histogram;
bool debug_synchronize;
// declare parameters
DMLC_DECLARE_PARAMETER(GPUHistMakerTrainParam) {
DMLC_DECLARE_FIELD(single_precision_histogram).set_default(false).describe(
"Use single precision to build histograms.");
DMLC_DECLARE_FIELD(deterministic_histogram).set_default(true).describe(
"Pre-round the gradient for obtaining deterministic gradient histogram.");
DMLC_DECLARE_FIELD(debug_synchronize).set_default(false).describe(
"Check if all distributed tree are identical after tree construction.");
}
};
#if !defined(GTEST_TEST)
DMLC_REGISTER_PARAMETER(GPUHistMakerTrainParam);
#endif // !defined(GTEST_TEST)
struct ExpandEntry {
int nid;
int depth;
DeviceSplitCandidate split;
uint64_t timestamp;
ExpandEntry() = default;
ExpandEntry(int nid, int depth, DeviceSplitCandidate split,
uint64_t timestamp)
: nid(nid), depth(depth), split(std::move(split)), timestamp(timestamp) {}
bool IsValid(const TrainParam& param, int num_leaves) const {
if (split.loss_chg <= kRtEps) return false;
if (split.left_sum.GetHess() == 0 || split.right_sum.GetHess() == 0) {
return false;
}
if (split.loss_chg < param.min_split_loss) { return false; }
if (param.max_depth > 0 && depth == param.max_depth) {return false; }
if (param.max_leaves > 0 && num_leaves == param.max_leaves) { return false; }
return true;
}
static bool ChildIsValid(const TrainParam& param, int depth, int num_leaves) {
if (param.max_depth > 0 && depth >= param.max_depth) return false;
if (param.max_leaves > 0 && num_leaves >= param.max_leaves) return false;
return true;
}
friend std::ostream& operator<<(std::ostream& os, const ExpandEntry& e) {
os << "ExpandEntry: \n";
os << "nidx: " << e.nid << "\n";
os << "depth: " << e.depth << "\n";
os << "loss: " << e.split.loss_chg << "\n";
os << "left_sum: " << e.split.left_sum << "\n";
os << "right_sum: " << e.split.right_sum << "\n";
return os;
}
};
inline static bool DepthWise(const ExpandEntry& lhs, const ExpandEntry& rhs) {
if (lhs.depth == rhs.depth) {
return lhs.timestamp > rhs.timestamp; // favor small timestamp
} else {
return lhs.depth > rhs.depth; // favor small depth
}
}
inline static bool LossGuide(const ExpandEntry& lhs, const ExpandEntry& rhs) {
if (lhs.split.loss_chg == rhs.split.loss_chg) {
return lhs.timestamp > rhs.timestamp; // favor small timestamp
} else {
return lhs.split.loss_chg < rhs.split.loss_chg; // favor large loss_chg
}
}
/**
* \struct DeviceHistogram
*
* \summary Data storage for node histograms on device. Automatically expands.
*
* \tparam GradientSumT histogram entry type.
* \tparam kStopGrowingSize Do not grow beyond this size
*
* \author Rory
* \date 28/07/2018
*/
template <typename GradientSumT, size_t kStopGrowingSize = 1 << 26>
class DeviceHistogram {
private:
/*! \brief Map nidx to starting index of its histogram. */
std::map<int, size_t> nidx_map_;
dh::device_vector<typename GradientSumT::ValueT> data_;
int n_bins_;
int device_id_;
static constexpr size_t kNumItemsInGradientSum =
sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT);
static_assert(kNumItemsInGradientSum == 2,
"Number of items in gradient type should be 2.");
public:
void Init(int device_id, int n_bins) {
this->n_bins_ = n_bins;
this->device_id_ = device_id;
}
void Reset() {
auto d_data = data_.data().get();
dh::LaunchN(device_id_, data_.size(),
[=] __device__(size_t idx) { d_data[idx] = 0.0f; });
nidx_map_.clear();
}
bool HistogramExists(int nidx) const {
return nidx_map_.find(nidx) != nidx_map_.cend();
}
int Bins() const {
return n_bins_;
}
size_t HistogramSize() const {
return n_bins_ * kNumItemsInGradientSum;
}
dh::device_vector<typename GradientSumT::ValueT>& Data() {
return data_;
}
void AllocateHistogram(int nidx) {
if (HistogramExists(nidx)) return;
// Number of items currently used in data
const size_t used_size = nidx_map_.size() * HistogramSize();
const size_t new_used_size = used_size + HistogramSize();
if (data_.size() >= kStopGrowingSize) {
// Recycle histogram memory
if (new_used_size <= data_.size()) {
// no need to remove old node, just insert the new one.
nidx_map_[nidx] = used_size;
// memset histogram size in bytes
} else {
std::pair<int, size_t> old_entry = *nidx_map_.begin();
nidx_map_.erase(old_entry.first);
nidx_map_[nidx] = old_entry.second;
}
// Zero recycled memory
auto d_data = data_.data().get() + nidx_map_[nidx];
dh::LaunchN(device_id_, n_bins_ * 2,
[=] __device__(size_t idx) { d_data[idx] = 0.0f; });
} else {
// Append new node histogram
nidx_map_[nidx] = used_size;
// Check there is enough memory for another histogram node
if (data_.size() < new_used_size + HistogramSize()) {
size_t new_required_memory =
std::max(data_.size() * 2, HistogramSize());
data_.resize(new_required_memory);
}
}
CHECK_GE(data_.size(), nidx_map_.size() * HistogramSize());
}
/**
* \summary Return pointer to histogram memory for a given node.
* \param nidx Tree node index.
* \return hist pointer.
*/
common::Span<GradientSumT> GetNodeHistogram(int nidx) {
CHECK(this->HistogramExists(nidx));
auto ptr = data_.data().get() + nidx_map_[nidx];
return common::Span<GradientSumT>(
reinterpret_cast<GradientSumT*>(ptr), n_bins_);
}
};
struct CalcWeightTrainParam {
float min_child_weight;
float reg_alpha;
float reg_lambda;
float max_delta_step;
float learning_rate;
XGBOOST_DEVICE explicit CalcWeightTrainParam(const TrainParam& p)
: min_child_weight(p.min_child_weight),
reg_alpha(p.reg_alpha),
reg_lambda(p.reg_lambda),
max_delta_step(p.max_delta_step),
learning_rate(p.learning_rate) {}
};
// Manage memory for a single GPU
template <typename GradientSumT>
struct GPUHistMakerDevice {
int device_id;
EllpackPageImpl* page;
BatchParam batch_param;
std::unique_ptr<RowPartitioner> row_partitioner;
DeviceHistogram<GradientSumT> hist{};
common::Span<GradientPair> gpair;
dh::caching_device_vector<int> monotone_constraints;
dh::caching_device_vector<bst_float> prediction_cache;
/*! \brief Sum gradient for each node. */
std::vector<GradientPair> node_sum_gradients;
TrainParam param;
bool deterministic_histogram;
GradientSumT histogram_rounding;
std::vector<cudaStream_t> streams{};
common::Monitor monitor;
std::vector<ValueConstraint> node_value_constraints;
common::ColumnSampler column_sampler;
FeatureInteractionConstraintDevice interaction_constraints;
using ExpandQueue =
std::priority_queue<ExpandEntry, std::vector<ExpandEntry>,
std::function<bool(ExpandEntry, ExpandEntry)>>;
std::unique_ptr<ExpandQueue> qexpand;
std::unique_ptr<GradientBasedSampler> sampler;
GPUHistMakerDevice(int _device_id,
EllpackPageImpl* _page,
bst_uint _n_rows,
TrainParam _param,
uint32_t column_sampler_seed,
uint32_t n_features,
bool deterministic_histogram,
BatchParam _batch_param)
: device_id(_device_id),
page(_page),
param(std::move(_param)),
column_sampler(column_sampler_seed),
interaction_constraints(param, n_features),
deterministic_histogram{deterministic_histogram},
batch_param(_batch_param) {
sampler.reset(new GradientBasedSampler(
page, _n_rows, batch_param, param.subsample, param.sampling_method));
if (!param.monotone_constraints.empty()) {
// Copy assigning an empty vector causes an exception in MSVC debug builds
monotone_constraints = param.monotone_constraints;
}
node_sum_gradients.resize(param.MaxNodes());
// Init histogram
hist.Init(device_id, page->Cuts().TotalBins());
monitor.Init(std::string("GPUHistMakerDevice") + std::to_string(device_id));
}
~GPUHistMakerDevice() { // NOLINT
dh::safe_cuda(cudaSetDevice(device_id));
for (auto& stream : streams) {
dh::safe_cuda(cudaStreamDestroy(stream));
}
}
// Get vector of at least n initialised streams
std::vector<cudaStream_t>& GetStreams(int n) {
if (n > streams.size()) {
for (auto& stream : streams) {
dh::safe_cuda(cudaStreamDestroy(stream));
}
streams.clear();
streams.resize(n);
for (auto& stream : streams) {
dh::safe_cuda(cudaStreamCreate(&stream));
}
}
return streams;
}
// Reset values for each update iteration
// Note that the column sampler must be passed by value because it is not
// thread safe
void Reset(HostDeviceVector<GradientPair>* dh_gpair, DMatrix* dmat, int64_t num_columns) {
if (param.grow_policy == TrainParam::kLossGuide) {
qexpand.reset(new ExpandQueue(LossGuide));
} else {
qexpand.reset(new ExpandQueue(DepthWise));
}
this->column_sampler.Init(num_columns, param.colsample_bynode,
param.colsample_bylevel, param.colsample_bytree);
dh::safe_cuda(cudaSetDevice(device_id));
this->interaction_constraints.Reset();
std::fill(node_sum_gradients.begin(), node_sum_gradients.end(),
GradientPair());
auto sample = sampler->Sample(dh_gpair->DeviceSpan(), dmat);
page = sample.page;
gpair = sample.gpair;
if (deterministic_histogram) {
histogram_rounding = CreateRoundingFactor<GradientSumT>(this->gpair);
} else {
histogram_rounding = GradientSumT{0.0, 0.0};
}
row_partitioner.reset(); // Release the device memory first before reallocating
row_partitioner.reset(new RowPartitioner(device_id, sample.sample_rows));
hist.Reset();
}
DeviceSplitCandidate EvaluateRootSplit(GradientPair root_sum) {
int nidx = 0;
dh::TemporaryArray<DeviceSplitCandidate> splits_out(1);
GPUTrainingParam gpu_param(param);
auto sampled_features = column_sampler.GetFeatureSet(0);
sampled_features->SetDevice(device_id);
common::Span<bst_feature_t> feature_set =
interaction_constraints.Query(sampled_features->DeviceSpan(), nidx);
auto matrix = page->GetDeviceAccessor(device_id);
EvaluateSplitInputs<GradientSumT> inputs{
nidx,
{root_sum.GetGrad(), root_sum.GetHess()},
gpu_param,
feature_set,
matrix.feature_segments,
matrix.gidx_fvalue_map,
matrix.min_fvalue,
hist.GetNodeHistogram(nidx),
node_value_constraints[nidx],
dh::ToSpan(monotone_constraints)};
EvaluateSingleSplit(dh::ToSpan(splits_out), inputs);
std::vector<DeviceSplitCandidate> result(1);
dh::safe_cuda(cudaMemcpy(result.data(), splits_out.data().get(),
sizeof(DeviceSplitCandidate) * splits_out.size(),
cudaMemcpyDeviceToHost));
return result.front();
}
std::vector<DeviceSplitCandidate> EvaluateLeftRightSplits(
ExpandEntry candidate, int left_nidx, int right_nidx,
const RegTree& tree) {
dh::TemporaryArray<DeviceSplitCandidate> splits_out(2);
GPUTrainingParam gpu_param(param);
auto left_sampled_features =
column_sampler.GetFeatureSet(tree.GetDepth(left_nidx));
left_sampled_features->SetDevice(device_id);
common::Span<bst_feature_t> left_feature_set =
interaction_constraints.Query(left_sampled_features->DeviceSpan(),
left_nidx);
auto right_sampled_features =
column_sampler.GetFeatureSet(tree.GetDepth(right_nidx));
right_sampled_features->SetDevice(device_id);
common::Span<bst_feature_t> right_feature_set =
interaction_constraints.Query(right_sampled_features->DeviceSpan(),
left_nidx);
auto matrix = page->GetDeviceAccessor(device_id);
EvaluateSplitInputs<GradientSumT> left{left_nidx,
{candidate.split.left_sum.GetGrad(),
candidate.split.left_sum.GetHess()},
gpu_param,
left_feature_set,
matrix.feature_segments,
matrix.gidx_fvalue_map,
matrix.min_fvalue,
hist.GetNodeHistogram(left_nidx),
node_value_constraints[left_nidx],
dh::ToSpan(monotone_constraints)};
EvaluateSplitInputs<GradientSumT> right{
right_nidx,
{candidate.split.right_sum.GetGrad(),
candidate.split.right_sum.GetHess()},
gpu_param,
right_feature_set,
matrix.feature_segments,
matrix.gidx_fvalue_map,
matrix.min_fvalue,
hist.GetNodeHistogram(right_nidx),
node_value_constraints[right_nidx],
dh::ToSpan(monotone_constraints)};
EvaluateSplits(dh::ToSpan(splits_out), left, right);
std::vector<DeviceSplitCandidate> result(2);
dh::safe_cuda(cudaMemcpy(result.data(), splits_out.data().get(),
sizeof(DeviceSplitCandidate) * splits_out.size(),
cudaMemcpyDeviceToHost));
return result;
}
void BuildHist(int nidx) {
hist.AllocateHistogram(nidx);
auto d_node_hist = hist.GetNodeHistogram(nidx);
auto d_ridx = row_partitioner->GetRows(nidx);
BuildGradientHistogram(page->GetDeviceAccessor(device_id), gpair, d_ridx, d_node_hist,
histogram_rounding);
}
void SubtractionTrick(int nidx_parent, int nidx_histogram,
int nidx_subtraction) {
auto d_node_hist_parent = hist.GetNodeHistogram(nidx_parent);
auto d_node_hist_histogram = hist.GetNodeHistogram(nidx_histogram);
auto d_node_hist_subtraction = hist.GetNodeHistogram(nidx_subtraction);
dh::LaunchN(device_id, page->Cuts().TotalBins(), [=] __device__(size_t idx) {
d_node_hist_subtraction[idx] =
d_node_hist_parent[idx] - d_node_hist_histogram[idx];
});
}
bool CanDoSubtractionTrick(int nidx_parent, int nidx_histogram,
int nidx_subtraction) {
// Make sure histograms are already allocated
hist.AllocateHistogram(nidx_subtraction);
return hist.HistogramExists(nidx_histogram) &&
hist.HistogramExists(nidx_parent);
}
void UpdatePosition(int nidx, RegTree::Node split_node) {
auto d_matrix = page->GetDeviceAccessor(device_id);
row_partitioner->UpdatePosition(
nidx, split_node.LeftChild(), split_node.RightChild(),
[=] __device__(bst_uint ridx) {
// given a row index, returns the node id it belongs to
bst_float cut_value =
d_matrix.GetFvalue(ridx, split_node.SplitIndex());
// Missing value
int new_position = 0;
if (isnan(cut_value)) {
new_position = split_node.DefaultChild();
} else {
if (cut_value <= split_node.SplitCond()) {
new_position = split_node.LeftChild();
} else {
new_position = split_node.RightChild();
}
}
return new_position;
});
}
// After tree update is finished, update the position of all training
// instances to their final leaf. This information is used later to update the
// prediction cache
void FinalisePosition(RegTree const* p_tree, DMatrix* p_fmat) {
dh::TemporaryArray<RegTree::Node> d_nodes(p_tree->GetNodes().size());
dh::safe_cuda(cudaMemcpy(d_nodes.data().get(), p_tree->GetNodes().data(),
d_nodes.size() * sizeof(RegTree::Node),
cudaMemcpyHostToDevice));
if (row_partitioner->GetRows().size() != p_fmat->Info().num_row_) {
row_partitioner.reset(); // Release the device memory first before reallocating
row_partitioner.reset(new RowPartitioner(device_id, p_fmat->Info().num_row_));
}
if (page->n_rows == p_fmat->Info().num_row_) {
FinalisePositionInPage(page, dh::ToSpan(d_nodes));
} else {
for (auto& batch : p_fmat->GetBatches<EllpackPage>(batch_param)) {
FinalisePositionInPage(batch.Impl(), dh::ToSpan(d_nodes));
}
}
}
void FinalisePositionInPage(EllpackPageImpl* page, const common::Span<RegTree::Node> d_nodes) {
auto d_matrix = page->GetDeviceAccessor(device_id);
row_partitioner->FinalisePosition(
[=] __device__(size_t row_id, int position) {
if (!d_matrix.IsInRange(row_id)) {
return RowPartitioner::kIgnoredTreePosition;
}
auto node = d_nodes[position];
while (!node.IsLeaf()) {
bst_float element = d_matrix.GetFvalue(row_id, node.SplitIndex());
// Missing value
if (isnan(element)) {
position = node.DefaultChild();
} else {
if (element <= node.SplitCond()) {
position = node.LeftChild();
} else {
position = node.RightChild();
}
}
node = d_nodes[position];
}
return position;
});
}
void UpdatePredictionCache(bst_float* out_preds_d) {
dh::safe_cuda(cudaSetDevice(device_id));
auto d_ridx = row_partitioner->GetRows();
if (prediction_cache.size() != d_ridx.size()) {
prediction_cache.resize(d_ridx.size());
dh::safe_cuda(cudaMemcpyAsync(prediction_cache.data().get(), out_preds_d,
prediction_cache.size() * sizeof(bst_float),
cudaMemcpyDefault));
}
CalcWeightTrainParam param_d(param);
dh::TemporaryArray<GradientPair> device_node_sum_gradients(node_sum_gradients.size());
dh::safe_cuda(
cudaMemcpyAsync(device_node_sum_gradients.data().get(), node_sum_gradients.data(),
sizeof(GradientPair) * node_sum_gradients.size(),
cudaMemcpyHostToDevice));
auto d_position = row_partitioner->GetPosition();
auto d_node_sum_gradients = device_node_sum_gradients.data().get();
auto d_prediction_cache = prediction_cache.data().get();
dh::LaunchN(
device_id, prediction_cache.size(), [=] __device__(int local_idx) {
int pos = d_position[local_idx];
bst_float weight = CalcWeight(param_d, d_node_sum_gradients[pos]);
d_prediction_cache[d_ridx[local_idx]] +=
weight * param_d.learning_rate;
});
dh::safe_cuda(cudaMemcpy(
out_preds_d, prediction_cache.data().get(),
prediction_cache.size() * sizeof(bst_float), cudaMemcpyDefault));
row_partitioner.reset();
}
void AllReduceHist(int nidx, dh::AllReducer* reducer) {
monitor.Start("AllReduce");
auto d_node_hist = hist.GetNodeHistogram(nidx).data();
reducer->AllReduceSum(
reinterpret_cast<typename GradientSumT::ValueT*>(d_node_hist),
reinterpret_cast<typename GradientSumT::ValueT*>(d_node_hist),
page->Cuts().TotalBins() * (sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT)));
reducer->Synchronize();
monitor.Stop("AllReduce");
}
/**
* \brief Build GPU local histograms for the left and right child of some parent node
*/
void BuildHistLeftRight(const ExpandEntry &candidate, int nidx_left,
int nidx_right, dh::AllReducer* reducer) {
auto build_hist_nidx = nidx_left;
auto subtraction_trick_nidx = nidx_right;
// Decide whether to build the left histogram or right histogram
// Use sum of Hessian as a heuristic to select node with fewest training instances
bool fewer_right = candidate.split.right_sum.GetHess() < candidate.split.left_sum.GetHess();
if (fewer_right) {
std::swap(build_hist_nidx, subtraction_trick_nidx);
}
this->BuildHist(build_hist_nidx);
this->AllReduceHist(build_hist_nidx, reducer);
// Check whether we can use the subtraction trick to calculate the other
bool do_subtraction_trick = this->CanDoSubtractionTrick(
candidate.nid, build_hist_nidx, subtraction_trick_nidx);
if (do_subtraction_trick) {
// Calculate other histogram using subtraction trick
this->SubtractionTrick(candidate.nid, build_hist_nidx,
subtraction_trick_nidx);
} else {
// Calculate other histogram manually
this->BuildHist(subtraction_trick_nidx);
this->AllReduceHist(subtraction_trick_nidx, reducer);
}
}
void ApplySplit(const ExpandEntry& candidate, RegTree* p_tree) {
RegTree& tree = *p_tree;
node_value_constraints.resize(tree.GetNodes().size());
auto parent_sum = candidate.split.left_sum + candidate.split.right_sum;
auto base_weight = node_value_constraints[candidate.nid].CalcWeight(
param, parent_sum);
auto left_weight = node_value_constraints[candidate.nid].CalcWeight(
param, candidate.split.left_sum) *
param.learning_rate;
auto right_weight = node_value_constraints[candidate.nid].CalcWeight(
param, candidate.split.right_sum) *
param.learning_rate;
tree.ExpandNode(candidate.nid, candidate.split.findex,
candidate.split.fvalue, candidate.split.dir == kLeftDir,
base_weight, left_weight, right_weight,
candidate.split.loss_chg, parent_sum.GetHess(),
candidate.split.left_sum.GetHess(), candidate.split.right_sum.GetHess());
// Set up child constraints
node_value_constraints.resize(tree.GetNodes().size());
node_value_constraints[candidate.nid].SetChild(
param, tree[candidate.nid].SplitIndex(), candidate.split.left_sum,
candidate.split.right_sum,
&node_value_constraints[tree[candidate.nid].LeftChild()],
&node_value_constraints[tree[candidate.nid].RightChild()]);
node_sum_gradients[tree[candidate.nid].LeftChild()] =
candidate.split.left_sum;
node_sum_gradients[tree[candidate.nid].RightChild()] =
candidate.split.right_sum;
interaction_constraints.Split(
candidate.nid, tree[candidate.nid].SplitIndex(),
tree[candidate.nid].LeftChild(),
tree[candidate.nid].RightChild());
}
void InitRoot(RegTree* p_tree, dh::AllReducer* reducer) {
constexpr bst_node_t kRootNIdx = 0;
dh::XGBCachingDeviceAllocator<char> alloc;
GradientPair root_sum = thrust::reduce(
thrust::cuda::par(alloc),
thrust::device_ptr<GradientPair const>(gpair.data()),
thrust::device_ptr<GradientPair const>(gpair.data() + gpair.size()));
rabit::Allreduce<rabit::op::Sum, float>(reinterpret_cast<float*>(&root_sum),
2);
this->BuildHist(kRootNIdx);
this->AllReduceHist(kRootNIdx, reducer);
// Remember root stats
node_sum_gradients[kRootNIdx] = root_sum;
p_tree->Stat(kRootNIdx).sum_hess = root_sum.GetHess();
auto weight = CalcWeight(param, root_sum);
p_tree->Stat(kRootNIdx).base_weight = weight;
(*p_tree)[kRootNIdx].SetLeaf(param.learning_rate * weight);
// Initialise root constraint
node_value_constraints.resize(p_tree->GetNodes().size());
// Generate first split
auto split = this->EvaluateRootSplit(root_sum);
qexpand->push(
ExpandEntry(kRootNIdx, p_tree->GetDepth(kRootNIdx), split, 0));
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair_all, DMatrix* p_fmat,
RegTree* p_tree, dh::AllReducer* reducer) {
auto& tree = *p_tree;
monitor.Start("Reset");
this->Reset(gpair_all, p_fmat, p_fmat->Info().num_col_);
monitor.Stop("Reset");
monitor.Start("InitRoot");
this->InitRoot(p_tree, reducer);
monitor.Stop("InitRoot");
auto timestamp = qexpand->size();
auto num_leaves = 1;
while (!qexpand->empty()) {
ExpandEntry candidate = qexpand->top();
qexpand->pop();
if (!candidate.IsValid(param, num_leaves)) {
continue;
}
this->ApplySplit(candidate, p_tree);
num_leaves++;
int left_child_nidx = tree[candidate.nid].LeftChild();
int right_child_nidx = tree[candidate.nid].RightChild();
// Only create child entries if needed
if (ExpandEntry::ChildIsValid(param, tree.GetDepth(left_child_nidx),
num_leaves)) {
monitor.Start("UpdatePosition");
this->UpdatePosition(candidate.nid, (*p_tree)[candidate.nid]);
monitor.Stop("UpdatePosition");
monitor.Start("BuildHist");
this->BuildHistLeftRight(candidate, left_child_nidx, right_child_nidx, reducer);
monitor.Stop("BuildHist");
monitor.Start("EvaluateSplits");
auto splits = this->EvaluateLeftRightSplits(candidate, left_child_nidx,
right_child_nidx,
*p_tree);
monitor.Stop("EvaluateSplits");
qexpand->push(ExpandEntry(left_child_nidx,
tree.GetDepth(left_child_nidx), splits.at(0),
timestamp++));
qexpand->push(ExpandEntry(right_child_nidx,
tree.GetDepth(right_child_nidx),
splits.at(1), timestamp++));
}
}
monitor.Start("FinalisePosition");
this->FinalisePosition(p_tree, p_fmat);
monitor.Stop("FinalisePosition");
}
};
template <typename GradientSumT>
class GPUHistMakerSpecialised {
public:
GPUHistMakerSpecialised() = default;
void Configure(const Args& args, GenericParameter const* generic_param) {
param_.UpdateAllowUnknown(args);
generic_param_ = generic_param;
hist_maker_param_.UpdateAllowUnknown(args);
dh::CheckComputeCapability();
monitor_.Init("updater_gpu_hist");
}
~GPUHistMakerSpecialised() { // NOLINT
dh::GlobalMemoryLogger().Log();
}
void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
const std::vector<RegTree*>& trees) {
monitor_.Start("Update");
// rescale learning rate according to size of trees
float lr = param_.learning_rate;
param_.learning_rate = lr / trees.size();
ValueConstraint::Init(¶m_, dmat->Info().num_col_);
// build tree
try {
for (xgboost::RegTree* tree : trees) {
this->UpdateTree(gpair, dmat, tree);
if (hist_maker_param_.debug_synchronize) {
this->CheckTreesSynchronized(tree);
}
}
dh::safe_cuda(cudaGetLastError());
} catch (const std::exception& e) {
LOG(FATAL) << "Exception in gpu_hist: " << e.what() << std::endl;
}
param_.learning_rate = lr;
monitor_.Stop("Update");
}
void InitDataOnce(DMatrix* dmat) {
device_ = generic_param_->gpu_id;
CHECK_GE(device_, 0) << "Must have at least one device";
info_ = &dmat->Info();
reducer_.Init({device_}); // NOLINT
// Synchronise the column sampling seed
uint32_t column_sampling_seed = common::GlobalRandom()();
rabit::Broadcast(&column_sampling_seed, sizeof(column_sampling_seed), 0);
BatchParam batch_param{
device_,
param_.max_bin,
generic_param_->gpu_page_size
};
auto page = (*dmat->GetBatches<EllpackPage>(batch_param).begin()).Impl();
dh::safe_cuda(cudaSetDevice(device_));
maker.reset(new GPUHistMakerDevice<GradientSumT>(device_,
page,
info_->num_row_,
param_,
column_sampling_seed,
info_->num_col_,
hist_maker_param_.deterministic_histogram,
batch_param));
p_last_fmat_ = dmat;
initialised_ = true;
}
void InitData(DMatrix* dmat) {
if (!initialised_) {
monitor_.Start("InitDataOnce");
this->InitDataOnce(dmat);
monitor_.Stop("InitDataOnce");
}
}
// Only call this method for testing
void CheckTreesSynchronized(RegTree* local_tree) const {
std::string s_model;
common::MemoryBufferStream fs(&s_model);
int rank = rabit::GetRank();
if (rank == 0) {
local_tree->Save(&fs);
}
fs.Seek(0);
rabit::Broadcast(&s_model, 0);
RegTree reference_tree {}; // rank 0 tree
reference_tree.Load(&fs);
CHECK(*local_tree == reference_tree);
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair, DMatrix* p_fmat,
RegTree* p_tree) {
monitor_.Start("InitData");
this->InitData(p_fmat);
monitor_.Stop("InitData");
gpair->SetDevice(device_);
maker->UpdateTree(gpair, p_fmat, p_tree, &reducer_);
}
bool UpdatePredictionCache(const DMatrix* data, HostDeviceVector<bst_float>* p_out_preds) {
if (maker == nullptr || p_last_fmat_ == nullptr || p_last_fmat_ != data) {
return false;
}
monitor_.Start("UpdatePredictionCache");
p_out_preds->SetDevice(device_);
maker->UpdatePredictionCache(p_out_preds->DevicePointer());
monitor_.Stop("UpdatePredictionCache");
return true;
}
TrainParam param_; // NOLINT
MetaInfo* info_{}; // NOLINT
std::unique_ptr<GPUHistMakerDevice<GradientSumT>> maker; // NOLINT
private:
bool initialised_ { false };
GPUHistMakerTrainParam hist_maker_param_;
GenericParameter const* generic_param_;
dh::AllReducer reducer_;
DMatrix* p_last_fmat_ { nullptr };
int device_{-1};
common::Monitor monitor_;
};
class GPUHistMaker : public TreeUpdater {
public:
void Configure(const Args& args) override {
// Used in test to count how many configurations are performed
LOG(DEBUG) << "[GPU Hist]: Configure";
hist_maker_param_.UpdateAllowUnknown(args);
// The passed in args can be empty, if we simply purge the old maker without
// preserving parameters then we can't do Update on it.
TrainParam param;
if (float_maker_) {
param = float_maker_->param_;
} else if (double_maker_) {
param = double_maker_->param_;
}
if (hist_maker_param_.single_precision_histogram) {
float_maker_.reset(new GPUHistMakerSpecialised<GradientPair>());
float_maker_->param_ = param;
float_maker_->Configure(args, tparam_);
} else {
double_maker_.reset(new GPUHistMakerSpecialised<GradientPairPrecise>());
double_maker_->param_ = param;
double_maker_->Configure(args, tparam_);
}
}
void LoadConfig(Json const& in) override {
auto const& config = get<Object const>(in);
FromJson(config.at("gpu_hist_train_param"), &this->hist_maker_param_);
if (hist_maker_param_.single_precision_histogram) {
float_maker_.reset(new GPUHistMakerSpecialised<GradientPair>());
FromJson(config.at("train_param"), &float_maker_->param_);
} else {
double_maker_.reset(new GPUHistMakerSpecialised<GradientPairPrecise>());
FromJson(config.at("train_param"), &double_maker_->param_);
}
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["gpu_hist_train_param"] = ToJson(hist_maker_param_);
if (hist_maker_param_.single_precision_histogram) {
out["train_param"] = ToJson(float_maker_->param_);
} else {
out["train_param"] = ToJson(double_maker_->param_);
}
}
void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
const std::vector<RegTree*>& trees) override {
if (hist_maker_param_.single_precision_histogram) {
float_maker_->Update(gpair, dmat, trees);
} else {
double_maker_->Update(gpair, dmat, trees);
}
}
bool UpdatePredictionCache(
const DMatrix* data, HostDeviceVector<bst_float>* p_out_preds) override {
if (hist_maker_param_.single_precision_histogram) {
return float_maker_->UpdatePredictionCache(data, p_out_preds);
} else {
return double_maker_->UpdatePredictionCache(data, p_out_preds);
}
}
char const* Name() const override {
return "grow_gpu_hist";
}
private:
GPUHistMakerTrainParam hist_maker_param_;
std::unique_ptr<GPUHistMakerSpecialised<GradientPair>> float_maker_;
std::unique_ptr<GPUHistMakerSpecialised<GradientPairPrecise>> double_maker_;
};
#if !defined(GTEST_TEST)
XGBOOST_REGISTER_TREE_UPDATER(GPUHistMaker, "grow_gpu_hist")
.describe("Grow tree with GPU.")
.set_body([]() { return new GPUHistMaker(); });
#endif // !defined(GTEST_TEST)
} // namespace tree
} // namespace xgboost
|
6d688d756aa447ba76dc067d4b979b039cf6d8c2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, kernels
#include <matrixMul_naive.cuh>
#include <matrixMul_tiling.cuh>
#include <matrixMul_coalescing.cuh>
#include <matrixMul_compOpt.cuh>
#include <matrixMul_unroll.cuh>
#include <matrixMul_prefetch.cuh>
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest(int argc, char** argv);
void randomInit(float*, int);
void printDiff(float*, float*, int, int);
extern "C"
void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int);
////////////////////////////////////////////////////////////////////////////////
// Helper Functions
////////////////////////////////////////////////////////////////////////////////
#ifndef STRNCASECMP
#define STRNCASECMP strncasecmp
#endif
inline int stringRemoveDelimiter(char delimiter, const char *string)
{
int string_start = 0;
while (string[string_start] == delimiter)
{
string_start++;
}
if (string_start >= (int)strlen(string)-1)
{
return 0;
}
return string_start;
}
inline bool checkCmdLineFlag(const int argc, const char **argv, const char *string_ref)
{
bool bFound = false;
if (argc >= 1)
{
for (int i=1; i < argc; i++)
{
int string_start = stringRemoveDelimiter('-', argv[i]);
const char *string_argv = &argv[i][string_start];
const char *equal_pos = strchr(string_argv, '=');
int argv_length = (int)(equal_pos == 0 ? strlen(string_argv) : equal_pos - string_argv);
int length = (int)strlen(string_ref);
if (length == argv_length && !STRNCASECMP(string_argv, string_ref, length))
{
bFound = true;
continue;
}
}
}
return bFound;
}
inline int getCmdLineArgumentInt(const int argc, const char **argv, const char *string_ref)
{
bool bFound = false;
int value = -1;
if (argc >= 1)
{
for (int i=1; i < argc; i++)
{
int string_start = stringRemoveDelimiter('-', argv[i]);
const char *string_argv = &argv[i][string_start];
int length = (int)strlen(string_ref);
if (!STRNCASECMP(string_argv, string_ref, length))
{
if (length+1 <= (int)strlen(string_argv))
{
int auto_inc = (string_argv[length] == '=') ? 1 : 0;
value = atoi(&string_argv[length + auto_inc]);
}
else
{
value = 0;
}
bFound = true;
continue;
}
}
}
if (bFound)
{
return value;
}
else
{
return 0;
}
}
void TransposeMatrix(float *data, float *dataT, int size, int wB, int hB){
for (int i = 0; i < wB; i++){
for (int j = 0; j < hB; j++){
dataT[hB*i + j] = data[wB*j + i];
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main(int argc, char** argv)
{
runTest(argc, argv);
exit(EXIT_SUCCESS);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void
runTest(int argc, char** argv)
{
/****************************************************/
/* Preparations */
/****************************************************/
printf("[Matrix Multiply Using CUDA] - Starting...\n");
if (checkCmdLineFlag(argc, (const char **)argv, "help") ||
checkCmdLineFlag(argc, (const char **)argv, "?"))
{
printf("Usage -device=n (n >= 0 for deviceID)\n");
printf(" -wA=WidthA -hA=HeightA (Width x Height of Matrix A)\n");
printf(" -wB=WidthB -hB=HeightB (Width x Height of Matrix B)\n");
printf(" Note: Outer matrix dimensions of A & B matrices must be equal.\n");
exit(EXIT_SUCCESS);
}
// By default, we use device 0, otherwise we override the device ID based on what is provided at the command line
int devID = 0;
if (checkCmdLineFlag(argc, (const char **)argv, "device"))
{
devID = getCmdLineArgumentInt(argc, (const char **)argv, "device");
hipSetDevice(devID);
}
int WA = (32 * BLOCK_SIZE); // Matrix A width
int HA = (16 * BLOCK_SIZE); // Matrix A height
int WB = (24 * BLOCK_SIZE); // Matrix B width
int HB = WA; // Matrix B height
if (checkCmdLineFlag(argc, (const char **)argv, "wA"))
{
WA = getCmdLineArgumentInt(argc, (const char **)argv, "wA");
}
if (checkCmdLineFlag(argc, (const char **)argv, "hA"))
{
HA = getCmdLineArgumentInt(argc, (const char **)argv, "hA");
}
if (checkCmdLineFlag(argc, (const char **)argv, "wB"))
{
WB = getCmdLineArgumentInt(argc, (const char **)argv, "wB");
}
if (checkCmdLineFlag(argc, (const char **)argv, "hB"))
{
HB = getCmdLineArgumentInt(argc, (const char **)argv, "hB");
}
int WC = WB; // Matrix C width
int HC = HA; // Matrix C height
hipError_t error;
hipDeviceProp_t deviceProp;
error = hipGetDevice(&devID);
if (error != hipSuccess)
{
printf("hipGetDevice returned error code %d, line(%d)\n", error, __LINE__);
}
error = hipGetDeviceProperties(&deviceProp, devID);
if (deviceProp.computeMode == hipComputeModeProhibited)
{
fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::hipSetDevice().\n");
exit(EXIT_SUCCESS);
}
if (error != hipSuccess)
{
printf("hipGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__);
}
else
{
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor);
}
// utilities
hipEvent_t start;
hipEvent_t stop;
float msecTotal;
// set seed for rand()
srand(2006);
// allocate host memory for matrices A and B
unsigned int size_A = WA * HA;
unsigned int mem_size_A = sizeof(float) * size_A;
float* h_A = (float*) malloc(mem_size_A);
unsigned int size_B = WB * HB;
unsigned int mem_size_B = sizeof(float) * size_B;
float* h_B = (float*) malloc(mem_size_B);
float flop = 2 * (float)WC * (float)HC * (float)WA;
float* h_Bt = (float*) malloc(mem_size_B);
// initialize host memory
randomInit(h_A, size_A);
randomInit(h_B, size_B);
TransposeMatrix(h_B, h_Bt, size_B, WB, HB);
// allocate device memory
float* d_A;
hipMalloc((void**) &d_A, mem_size_A);
float* d_B;
hipMalloc((void**) &d_B, mem_size_B);
// allocate device memory for result
unsigned int size_C = WC * HC;
unsigned int mem_size_C = sizeof(float) * size_C;
float* d_C;
hipMalloc((void**) &d_C, mem_size_C);
// allocate host memory for the result
float* h_C = (float*) malloc(mem_size_C);
#if CHECK_RESULT == 1
printf("Begining CPU")
// create and start timer
hipEventCreate(&start);
hipEventRecord(start, NULL);
// compute reference solution
float* reference = (float*) malloc(mem_size_C);
computeGold(reference, h_A, h_B, HA, WA, WB);
// stop and destroy timer
hipEventCreate(&stop);
hipEventRecord(stop, NULL);
hipEventSynchronize(stop);
hipEventElapsedTime(&msecTotal, start, stop);
printf("Naive CPU (Golden Reference)\n");
printf("Processing time: %f (ms), GFLOPS: %f \n", msecTotal, flop / msecTotal/ 1e+6);
printf("-------------------------------------\n");
#endif
dim3 threads,grid;
/****************************************************/
/* naive implementation on GPU */
/****************************************************/
#if ENABLE_NAIVE == 1
// create and start timer
hipEventCreate(&start);
hipEventRecord(start, NULL);
// setup execution parameters
threads = dim3(BLOCK_SIZE, BLOCK_SIZE);
grid = dim3(WC / threads.x, HC / threads.y);
// copy host memory to device
hipMemcpy(d_A, h_A, mem_size_A,
hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, mem_size_B,
hipMemcpyHostToDevice);
// naive implementation
hipLaunchKernelGGL(( matrixMul_naive), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, WA, WB);
// copy result from device to host
hipMemcpy(h_C, d_C, mem_size_C,
hipMemcpyDeviceToHost);
// stop and destroy timer
hipEventCreate(&stop);
hipEventRecord(stop, NULL);
hipEventSynchronize(stop);
hipEventElapsedTime(&msecTotal, start, stop);
printf("Naive GPU\n");
printf("Processing time: %f (ms), GFLOPS: %f \n", msecTotal, flop / msecTotal/ 1e+6);
printf("-------------------------------------\n");
#if CHECK_RESULT == 1
// check result
printDiff(reference, h_C, WC, HC);
#endif
#endif
/****************************************************/
/* Tiling without global mem coalescing */
/****************************************************/
// create and start timer
hipEventCreate(&start);
hipEventRecord(start, NULL);
// setup execution parameters
threads = dim3(BLOCK_SIZE, BLOCK_SIZE);
grid = dim3(WC / threads.x, HC / threads.y);
// copy host memory to device
hipMemcpy(d_A, h_A, mem_size_A,
hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, mem_size_B,
hipMemcpyHostToDevice);
// naive implementation
hipLaunchKernelGGL(( matrixMul_tiling), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, WA, WB);
// copy result from device to host
hipMemcpy(h_C, d_C, mem_size_C,
hipMemcpyDeviceToHost);
// stop and destroy timer
hipEventCreate(&stop);
hipEventRecord(stop, NULL);
hipEventSynchronize(stop);
hipEventElapsedTime(&msecTotal, start, stop);
printf("Tiling GPU\n");
printf("Processing time: %f (ms), GFLOPS: %f \n", msecTotal, flop / msecTotal/ 1e+6);
printf("-------------------------------------\n");
#if CHECK_RESULT == 1
// check result
printDiff(reference, h_C, WC, HC);
#endif
/****************************************************/
/* Global mem coalescing with smem bank conflict */
/****************************************************/
// create and start timer
hipEventCreate(&start);
hipEventRecord(start, NULL);
// setup execution parameters
threads = dim3(BLOCK_SIZE, BLOCK_SIZE);
grid = dim3(WC / threads.x, HC / threads.y);
// copy host memory to device
hipMemcpy(d_A, h_A, mem_size_A,
hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, mem_size_B,
hipMemcpyHostToDevice);
// naive implementation
hipLaunchKernelGGL(( matrixMul_coalescing), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, WA, WB);
// copy result from device to host
hipMemcpy(h_C, d_C, mem_size_C,
hipMemcpyDeviceToHost);
// stop and destroy timer
hipEventCreate(&stop);
hipEventRecord(stop, NULL);
hipEventSynchronize(stop);
hipEventElapsedTime(&msecTotal, start, stop);
printf("Global mem coalescing GPU\n");
printf("Processing time: %f (ms), GFLOPS: %f \n", msecTotal, flop / msecTotal/ 1e+6);
printf("-------------------------------------\n");
#if CHECK_RESULT == 1
// check result
printDiff(reference, h_C, WC, HC);
#endif
/****************************************************/
/* Threads perform computation optimizatin */
/****************************************************/
// create and start timer
hipEventCreate(&start);
hipEventRecord(start, NULL);
// setup execution parameters
threads = dim3(BLOCK_SIZE, 4);
grid = dim3(WC / (BLOCK_SIZE*4), HC / BLOCK_SIZE);
// copy host memory to device
hipMemcpy(d_A, h_A, mem_size_A,
hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, mem_size_B,
hipMemcpyHostToDevice);
// naive implementation
hipLaunchKernelGGL(( matrixMul_compOpt), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, WA, WB);
// copy result from device to host
hipMemcpy(h_C, d_C, mem_size_C,
hipMemcpyDeviceToHost);
// stop and destroy timer
hipEventCreate(&stop);
hipEventRecord(stop, NULL);
hipEventSynchronize(stop);
hipEventElapsedTime(&msecTotal, start, stop);
printf("Threads perform computation optimization GPU\n");
printf("Processing time: %f (ms), GFLOPS: %f \n", msecTotal, flop / msecTotal/ 1e+6);
printf("-------------------------------------\n");
#if CHECK_RESULT == 1
// check result
printDiff(reference, h_C, WC, HC);
#endif
/****************************************************/
/* Loop Unrolling */
/****************************************************/
// create and start timer
hipEventCreate(&start);
hipEventRecord(start, NULL);
// setup execution parameters
threads = dim3(BLOCK_SIZE, 4);
grid = dim3(WC / (BLOCK_SIZE*4), HC / BLOCK_SIZE);
// copy host memory to device
hipMemcpy(d_A, h_A, mem_size_A,
hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, mem_size_B,
hipMemcpyHostToDevice);
// naive implementation
hipLaunchKernelGGL(( matrixMul_unroll), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, WA, WB);
// copy result from device to host
hipMemcpy(h_C, d_C, mem_size_C,
hipMemcpyDeviceToHost);
// stop and destroy timer
hipEventCreate(&stop);
hipEventRecord(stop, NULL);
hipEventSynchronize(stop);
hipEventElapsedTime(&msecTotal, start, stop);
printf("Loop unrolling GPU\n");
printf("Processing time: %f (ms), GFLOPS: %f \n", msecTotal, flop / msecTotal/ 1e+6);
printf("-------------------------------------\n");
#if CHECK_RESULT == 1
// check result
printDiff(reference, h_C, WC, HC);
#endif
/****************************************************/
/* Prefetching */
/****************************************************/
// create and start timer
hipEventCreate(&start);
hipEventRecord(start, NULL);
// setup execution parameters
threads = dim3(BLOCK_SIZE, 4);
grid = dim3(WC / (BLOCK_SIZE*4), HC / BLOCK_SIZE);
// copy host memory to device
hipMemcpy(d_A, h_A, mem_size_A,
hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, mem_size_B,
hipMemcpyHostToDevice);
// naive implementation
hipLaunchKernelGGL(( matrixMul_prefetch), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, WA, WB);
// copy result from device to host
hipMemcpy(h_C, d_C, mem_size_C,
hipMemcpyDeviceToHost);
// stop and destroy timer
hipEventCreate(&stop);
hipEventRecord(stop, NULL);
hipEventSynchronize(stop);
hipEventElapsedTime(&msecTotal, start, stop);
printf("Prefetching GPU\n");
printf("Processing time: %f (ms), GFLOPS: %f \n", msecTotal, flop / msecTotal/ 1e+6);
printf("-------------------------------------\n");
#if CHECK_RESULT == 1
// check result
printDiff(reference, h_C, WC, HC);
#endif
/****************************************************/
/* Cleaning */
/****************************************************/
// clean up memory
free(h_A);
free(h_B);
free(h_C);
free(h_Bt);
#if CHECK_RESULT == 1
free(reference);
#endif
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
hipDeviceReset();
}
// Allocates a matrix with random float entries.
void randomInit(float* data, int size)
{
for (int i = 0; i < size; ++i)
data[i] = rand() / (float)RAND_MAX;
}
void printDiff(float *data1, float *data2, int width, int height)
{
int i,j,k;
int error_count=0;
for (j=0; j<height; j++) {
for (i=0; i<width; i++) {
k = j*width+i;
if (fabs(data1[k] - data2[k]) > 0.1 ) {
printf("diff(%d,%d) CPU=%4.4f, GPU=%4.4f \n", i,j, data1[k], data2[k]);
error_count++;
}
}
}
printf("Total Errors = %d \n", error_count);
}
| 6d688d756aa447ba76dc067d4b979b039cf6d8c2.cu | // includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, kernels
#include <matrixMul_naive.cuh>
#include <matrixMul_tiling.cuh>
#include <matrixMul_coalescing.cuh>
#include <matrixMul_compOpt.cuh>
#include <matrixMul_unroll.cuh>
#include <matrixMul_prefetch.cuh>
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest(int argc, char** argv);
void randomInit(float*, int);
void printDiff(float*, float*, int, int);
extern "C"
void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int);
////////////////////////////////////////////////////////////////////////////////
// Helper Functions
////////////////////////////////////////////////////////////////////////////////
#ifndef STRNCASECMP
#define STRNCASECMP strncasecmp
#endif
inline int stringRemoveDelimiter(char delimiter, const char *string)
{
int string_start = 0;
while (string[string_start] == delimiter)
{
string_start++;
}
if (string_start >= (int)strlen(string)-1)
{
return 0;
}
return string_start;
}
inline bool checkCmdLineFlag(const int argc, const char **argv, const char *string_ref)
{
bool bFound = false;
if (argc >= 1)
{
for (int i=1; i < argc; i++)
{
int string_start = stringRemoveDelimiter('-', argv[i]);
const char *string_argv = &argv[i][string_start];
const char *equal_pos = strchr(string_argv, '=');
int argv_length = (int)(equal_pos == 0 ? strlen(string_argv) : equal_pos - string_argv);
int length = (int)strlen(string_ref);
if (length == argv_length && !STRNCASECMP(string_argv, string_ref, length))
{
bFound = true;
continue;
}
}
}
return bFound;
}
inline int getCmdLineArgumentInt(const int argc, const char **argv, const char *string_ref)
{
bool bFound = false;
int value = -1;
if (argc >= 1)
{
for (int i=1; i < argc; i++)
{
int string_start = stringRemoveDelimiter('-', argv[i]);
const char *string_argv = &argv[i][string_start];
int length = (int)strlen(string_ref);
if (!STRNCASECMP(string_argv, string_ref, length))
{
if (length+1 <= (int)strlen(string_argv))
{
int auto_inc = (string_argv[length] == '=') ? 1 : 0;
value = atoi(&string_argv[length + auto_inc]);
}
else
{
value = 0;
}
bFound = true;
continue;
}
}
}
if (bFound)
{
return value;
}
else
{
return 0;
}
}
void TransposeMatrix(float *data, float *dataT, int size, int wB, int hB){
for (int i = 0; i < wB; i++){
for (int j = 0; j < hB; j++){
dataT[hB*i + j] = data[wB*j + i];
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main(int argc, char** argv)
{
runTest(argc, argv);
exit(EXIT_SUCCESS);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void
runTest(int argc, char** argv)
{
/****************************************************/
/* Preparations */
/****************************************************/
printf("[Matrix Multiply Using CUDA] - Starting...\n");
if (checkCmdLineFlag(argc, (const char **)argv, "help") ||
checkCmdLineFlag(argc, (const char **)argv, "?"))
{
printf("Usage -device=n (n >= 0 for deviceID)\n");
printf(" -wA=WidthA -hA=HeightA (Width x Height of Matrix A)\n");
printf(" -wB=WidthB -hB=HeightB (Width x Height of Matrix B)\n");
printf(" Note: Outer matrix dimensions of A & B matrices must be equal.\n");
exit(EXIT_SUCCESS);
}
// By default, we use device 0, otherwise we override the device ID based on what is provided at the command line
int devID = 0;
if (checkCmdLineFlag(argc, (const char **)argv, "device"))
{
devID = getCmdLineArgumentInt(argc, (const char **)argv, "device");
cudaSetDevice(devID);
}
int WA = (32 * BLOCK_SIZE); // Matrix A width
int HA = (16 * BLOCK_SIZE); // Matrix A height
int WB = (24 * BLOCK_SIZE); // Matrix B width
int HB = WA; // Matrix B height
if (checkCmdLineFlag(argc, (const char **)argv, "wA"))
{
WA = getCmdLineArgumentInt(argc, (const char **)argv, "wA");
}
if (checkCmdLineFlag(argc, (const char **)argv, "hA"))
{
HA = getCmdLineArgumentInt(argc, (const char **)argv, "hA");
}
if (checkCmdLineFlag(argc, (const char **)argv, "wB"))
{
WB = getCmdLineArgumentInt(argc, (const char **)argv, "wB");
}
if (checkCmdLineFlag(argc, (const char **)argv, "hB"))
{
HB = getCmdLineArgumentInt(argc, (const char **)argv, "hB");
}
int WC = WB; // Matrix C width
int HC = HA; // Matrix C height
cudaError_t error;
cudaDeviceProp deviceProp;
error = cudaGetDevice(&devID);
if (error != cudaSuccess)
{
printf("cudaGetDevice returned error code %d, line(%d)\n", error, __LINE__);
}
error = cudaGetDeviceProperties(&deviceProp, devID);
if (deviceProp.computeMode == cudaComputeModeProhibited)
{
fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::cudaSetDevice().\n");
exit(EXIT_SUCCESS);
}
if (error != cudaSuccess)
{
printf("cudaGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__);
}
else
{
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor);
}
// utilities
cudaEvent_t start;
cudaEvent_t stop;
float msecTotal;
// set seed for rand()
srand(2006);
// allocate host memory for matrices A and B
unsigned int size_A = WA * HA;
unsigned int mem_size_A = sizeof(float) * size_A;
float* h_A = (float*) malloc(mem_size_A);
unsigned int size_B = WB * HB;
unsigned int mem_size_B = sizeof(float) * size_B;
float* h_B = (float*) malloc(mem_size_B);
float flop = 2 * (float)WC * (float)HC * (float)WA;
float* h_Bt = (float*) malloc(mem_size_B);
// initialize host memory
randomInit(h_A, size_A);
randomInit(h_B, size_B);
TransposeMatrix(h_B, h_Bt, size_B, WB, HB);
// allocate device memory
float* d_A;
cudaMalloc((void**) &d_A, mem_size_A);
float* d_B;
cudaMalloc((void**) &d_B, mem_size_B);
// allocate device memory for result
unsigned int size_C = WC * HC;
unsigned int mem_size_C = sizeof(float) * size_C;
float* d_C;
cudaMalloc((void**) &d_C, mem_size_C);
// allocate host memory for the result
float* h_C = (float*) malloc(mem_size_C);
#if CHECK_RESULT == 1
printf("Begining CPU")
// create and start timer
cudaEventCreate(&start);
cudaEventRecord(start, NULL);
// compute reference solution
float* reference = (float*) malloc(mem_size_C);
computeGold(reference, h_A, h_B, HA, WA, WB);
// stop and destroy timer
cudaEventCreate(&stop);
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&msecTotal, start, stop);
printf("Naive CPU (Golden Reference)\n");
printf("Processing time: %f (ms), GFLOPS: %f \n", msecTotal, flop / msecTotal/ 1e+6);
printf("-------------------------------------\n");
#endif
dim3 threads,grid;
/****************************************************/
/* naive implementation on GPU */
/****************************************************/
#if ENABLE_NAIVE == 1
// create and start timer
cudaEventCreate(&start);
cudaEventRecord(start, NULL);
// setup execution parameters
threads = dim3(BLOCK_SIZE, BLOCK_SIZE);
grid = dim3(WC / threads.x, HC / threads.y);
// copy host memory to device
cudaMemcpy(d_A, h_A, mem_size_A,
cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, mem_size_B,
cudaMemcpyHostToDevice);
// naive implementation
matrixMul_naive<<< grid, threads >>>(d_C, d_A, d_B, WA, WB);
// copy result from device to host
cudaMemcpy(h_C, d_C, mem_size_C,
cudaMemcpyDeviceToHost);
// stop and destroy timer
cudaEventCreate(&stop);
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&msecTotal, start, stop);
printf("Naive GPU\n");
printf("Processing time: %f (ms), GFLOPS: %f \n", msecTotal, flop / msecTotal/ 1e+6);
printf("-------------------------------------\n");
#if CHECK_RESULT == 1
// check result
printDiff(reference, h_C, WC, HC);
#endif
#endif
/****************************************************/
/* Tiling without global mem coalescing */
/****************************************************/
// create and start timer
cudaEventCreate(&start);
cudaEventRecord(start, NULL);
// setup execution parameters
threads = dim3(BLOCK_SIZE, BLOCK_SIZE);
grid = dim3(WC / threads.x, HC / threads.y);
// copy host memory to device
cudaMemcpy(d_A, h_A, mem_size_A,
cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, mem_size_B,
cudaMemcpyHostToDevice);
// naive implementation
matrixMul_tiling<<< grid, threads >>>(d_C, d_A, d_B, WA, WB);
// copy result from device to host
cudaMemcpy(h_C, d_C, mem_size_C,
cudaMemcpyDeviceToHost);
// stop and destroy timer
cudaEventCreate(&stop);
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&msecTotal, start, stop);
printf("Tiling GPU\n");
printf("Processing time: %f (ms), GFLOPS: %f \n", msecTotal, flop / msecTotal/ 1e+6);
printf("-------------------------------------\n");
#if CHECK_RESULT == 1
// check result
printDiff(reference, h_C, WC, HC);
#endif
/****************************************************/
/* Global mem coalescing with smem bank conflict */
/****************************************************/
// create and start timer
cudaEventCreate(&start);
cudaEventRecord(start, NULL);
// setup execution parameters
threads = dim3(BLOCK_SIZE, BLOCK_SIZE);
grid = dim3(WC / threads.x, HC / threads.y);
// copy host memory to device
cudaMemcpy(d_A, h_A, mem_size_A,
cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, mem_size_B,
cudaMemcpyHostToDevice);
// naive implementation
matrixMul_coalescing<<< grid, threads >>>(d_C, d_A, d_B, WA, WB);
// copy result from device to host
cudaMemcpy(h_C, d_C, mem_size_C,
cudaMemcpyDeviceToHost);
// stop and destroy timer
cudaEventCreate(&stop);
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&msecTotal, start, stop);
printf("Global mem coalescing GPU\n");
printf("Processing time: %f (ms), GFLOPS: %f \n", msecTotal, flop / msecTotal/ 1e+6);
printf("-------------------------------------\n");
#if CHECK_RESULT == 1
// check result
printDiff(reference, h_C, WC, HC);
#endif
/****************************************************/
/* Threads perform computation optimizatin */
/****************************************************/
// create and start timer
cudaEventCreate(&start);
cudaEventRecord(start, NULL);
// setup execution parameters
threads = dim3(BLOCK_SIZE, 4);
grid = dim3(WC / (BLOCK_SIZE*4), HC / BLOCK_SIZE);
// copy host memory to device
cudaMemcpy(d_A, h_A, mem_size_A,
cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, mem_size_B,
cudaMemcpyHostToDevice);
// naive implementation
matrixMul_compOpt<<< grid, threads >>>(d_C, d_A, d_B, WA, WB);
// copy result from device to host
cudaMemcpy(h_C, d_C, mem_size_C,
cudaMemcpyDeviceToHost);
// stop and destroy timer
cudaEventCreate(&stop);
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&msecTotal, start, stop);
printf("Threads perform computation optimization GPU\n");
printf("Processing time: %f (ms), GFLOPS: %f \n", msecTotal, flop / msecTotal/ 1e+6);
printf("-------------------------------------\n");
#if CHECK_RESULT == 1
// check result
printDiff(reference, h_C, WC, HC);
#endif
/****************************************************/
/* Loop Unrolling */
/****************************************************/
// create and start timer
cudaEventCreate(&start);
cudaEventRecord(start, NULL);
// setup execution parameters
threads = dim3(BLOCK_SIZE, 4);
grid = dim3(WC / (BLOCK_SIZE*4), HC / BLOCK_SIZE);
// copy host memory to device
cudaMemcpy(d_A, h_A, mem_size_A,
cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, mem_size_B,
cudaMemcpyHostToDevice);
// naive implementation
matrixMul_unroll<<< grid, threads >>>(d_C, d_A, d_B, WA, WB);
// copy result from device to host
cudaMemcpy(h_C, d_C, mem_size_C,
cudaMemcpyDeviceToHost);
// stop and destroy timer
cudaEventCreate(&stop);
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&msecTotal, start, stop);
printf("Loop unrolling GPU\n");
printf("Processing time: %f (ms), GFLOPS: %f \n", msecTotal, flop / msecTotal/ 1e+6);
printf("-------------------------------------\n");
#if CHECK_RESULT == 1
// check result
printDiff(reference, h_C, WC, HC);
#endif
/****************************************************/
/* Prefetching */
/****************************************************/
// create and start timer
cudaEventCreate(&start);
cudaEventRecord(start, NULL);
// setup execution parameters
threads = dim3(BLOCK_SIZE, 4);
grid = dim3(WC / (BLOCK_SIZE*4), HC / BLOCK_SIZE);
// copy host memory to device
cudaMemcpy(d_A, h_A, mem_size_A,
cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, mem_size_B,
cudaMemcpyHostToDevice);
// naive implementation
matrixMul_prefetch<<< grid, threads >>>(d_C, d_A, d_B, WA, WB);
// copy result from device to host
cudaMemcpy(h_C, d_C, mem_size_C,
cudaMemcpyDeviceToHost);
// stop and destroy timer
cudaEventCreate(&stop);
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&msecTotal, start, stop);
printf("Prefetching GPU\n");
printf("Processing time: %f (ms), GFLOPS: %f \n", msecTotal, flop / msecTotal/ 1e+6);
printf("-------------------------------------\n");
#if CHECK_RESULT == 1
// check result
printDiff(reference, h_C, WC, HC);
#endif
/****************************************************/
/* Cleaning */
/****************************************************/
// clean up memory
free(h_A);
free(h_B);
free(h_C);
free(h_Bt);
#if CHECK_RESULT == 1
free(reference);
#endif
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
cudaThreadExit();
}
// Allocates a matrix with random float entries.
void randomInit(float* data, int size)
{
for (int i = 0; i < size; ++i)
data[i] = rand() / (float)RAND_MAX;
}
void printDiff(float *data1, float *data2, int width, int height)
{
int i,j,k;
int error_count=0;
for (j=0; j<height; j++) {
for (i=0; i<width; i++) {
k = j*width+i;
if (fabs(data1[k] - data2[k]) > 0.1 ) {
printf("diff(%d,%d) CPU=%4.4f, GPU=%4.4f \n", i,j, data1[k], data2[k]);
error_count++;
}
}
}
printf("Total Errors = %d \n", error_count);
}
|
aff2ed79a77ada24099b1883cbf2e954ba2fbd07.hip | // !!! This is a file automatically generated by hipify!!!
//
// UnaryExecution.cpp
// MNN
//
// Created by MNN on 2019/02/28.
// Copyright 2018, Alibaba Group Holding Limited
//
#include "UnaryExecution.hpp"
#include "core/Macro.h"
#include "core/TensorUtils.hpp"
#include "backend/cuda/core/CUDABackend.hpp"
#include <hip/hip_runtime.h>
namespace MNN {
namespace CUDA {
template <typename T>
__global__ void ABS(T *input, T *output, size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
output[i] = abs(input[i]);
}
return;
}
template <typename T>
__global__ void EXP(T *input, T *output, size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
output[i] = exp(input[i]);
}
return;
}
template <typename T>
__global__ void NEG(T *input, T *output, size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
output[i] = -input[i];
}
return;
}
template <typename T>
__global__ void RECIPROCAL(T *input, T *output, size_t count) {
T one = 1.0;
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
output[i] = one / input[i];
}
return;
}
template <typename T>
__global__ void FLOOR(T *input, T *output, size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
output[i] = floor(input[i]);
}
}
template <typename T>
__global__ void CEIL(T *input, T *output, size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
output[i] = ceil(input[i]);
}
}
template <typename T>
__global__ void SQUARE(T *input, T *output, size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
output[i] = input[i] * input[i];
}
return;
}
template <typename T>
__global__ void SQRT(T *input, T *output, size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
output[i] = sqrt(input[i]);
}
return;
}
template <typename T>
__global__ void RSQRT(T *input, T *output, size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
output[i] = rsqrt(input[i]);
}
return;
}
template <typename T>
__global__ void LOG(T *input, T *output, size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
output[i] = log(input[i]);
}
return;
}
template <typename T>
__global__ void SIN(T *input, T *output, size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
output[i] = sin(input[i]);
}
return;
}
template <typename T>
__global__ void COS(T *input, T *output, size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
output[i] = cos(input[i]);
}
return;
}
template <typename T>
__global__ void TAN(T *input, T *output, size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
output[i] = tan(input[i]);
}
return;
}
template <typename T>
__global__ void ASIN(T *input, T *output, size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
output[i] = asin(input[i]);
}
return;
}
template <typename T>
__global__ void ACOS(T *input, T *output, size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
output[i] = acos(input[i]);
}
return;
}
template <typename T>
__global__ void ATAN(T *input, T *output, size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
output[i] = atan(input[i]);
}
return;
}
template <typename T>
__global__ void LOG1P(T *input, T *output, size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
output[i] = log(1+input[i]);
}
return;
}
template <typename T>
__global__ void TANH(T *input, T *output, size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
output[i] = tanh(input[i]);
}
return;
}
template <typename T>
__global__ void SIGMOID(T *input, T *output, size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
output[i] = 1. / (1. + exp(-input[i]));
}
return;
}
template <typename T>
__global__ void EXPM1(T *input, T *output, size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
output[i] = exp(input[i]) - 1;
}
return;
}
template <typename T>
__global__ void ATANH(T *input, T *output, size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
output[i] = atanh(input[i]);
}
return;
}
template <typename T>
__global__ void ACOSH(T *input, T *output, size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
output[i] = acosh(input[i]);
}
return;
}
template <typename T>
__global__ void SIGN(T *input, T *output, size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
T x = input[i];
output[i] = x > 0 ? 1 : (x<0 ? -1 : 0);
}
return;
}
template <typename T>
__global__ void COSH(T *input, T *output, size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
output[i] = cosh(input[i]);
}
return;
}
template <typename T>
__global__ void ROUND(T *input, T *output, size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
output[i] = round(input[i]);
}
return;
}
template <typename T>
__global__ void SINH(T *input, T *output, size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
output[i] = sinh(input[i]);
}
return;
}
template <typename T>
__global__ void ASINH(T *input, T *output, size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
output[i] = asinh(input[i]);
}
return;
}
template <typename T>
__global__ void HARDSWISH(T *input, T *output, size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
if (input[i] <= -3) {
output[i] = 0;
} else if (input[i] >= 3) {
output[i] = input[i];
} else {
output[i] = input[i] * (input[i] + 3) / 6;
}
}
return;
}
void callUnary(void *input, void *output, size_t count, MNN::CUDARuntime* runtime, halide_type_t data_type,
MNN::UnaryOpOperation op_type)
{
int block_num = runtime->blocks_num(count);
int threads_num = runtime->threads_num() > count ? count : runtime->threads_num();
#define COMPUTE(TYPE)\
if (op_type == MNN::UnaryOpOperation_##TYPE ) {hipLaunchKernelGGL(( TYPE), dim3(block_num), dim3(threads_num), 0, 0, (float*)input, (float*)output, count); return;};
COMPUTE(ABS);
COMPUTE(NEG);
COMPUTE(FLOOR);
COMPUTE(CEIL);
COMPUTE(SQUARE);
COMPUTE(SQRT);
COMPUTE(RSQRT);
COMPUTE(EXP);
COMPUTE(LOG);
COMPUTE(SIN);
COMPUTE(COS);
COMPUTE(TAN);
COMPUTE(ASIN);
COMPUTE(ACOS);
COMPUTE(ATAN);
COMPUTE(RECIPROCAL);
COMPUTE(LOG1P);
COMPUTE(TANH);
COMPUTE(SIGMOID);
COMPUTE(EXPM1);
COMPUTE(ACOSH);
COMPUTE(ATANH);
COMPUTE(SIGN);
COMPUTE(COSH);
COMPUTE(ROUND);
COMPUTE(SINH);
COMPUTE(ASINH);
COMPUTE(HARDSWISH);
//case CudaUnaryOpOperation_BNLL:
//case CudaUnaryOpOperation_ERF:
//case CudaUnaryOpOperation_ERFC:
//case CudaUnaryOpOperation_ERFINV:
return;
}
UnaryExecution::UnaryExecution(UnaryOpOperation opType, Backend* backend) : Execution(backend) {
auto cudaBackend = static_cast<CUDABackend*>(backend);
mRuntime = cudaBackend->getCUDARuntime();
mOpType = opType;
}
ErrorCode UnaryExecution::onResize(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs) {
auto shape = inputs[0]->shape();
mCount = CUDABackend::realSize(inputs[0]);
return NO_ERROR;
}
ErrorCode UnaryExecution::onExecute(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs) {
#ifdef LOG_VERBOSE
MNN_PRINT("start UnaryExecution onExecute...");
#endif
auto type = inputs[0]->getType();
callUnary((void*)inputs[0]->deviceId(), (void*)outputs[0]->deviceId(), mCount, mRuntime, type, mOpType);
#ifdef LOG_VERBOSE
MNN_PRINT("end UnaryExecution onExecute...");
#endif
return NO_ERROR;
}
__global__ void RELU(const float *input, float *output, size_t count, float slope) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
float x = input[i];
float y = x > 0 ? x : x * slope;
output[i] = y;
}
return;
}
class ReluExecution : public Execution {
public:
ReluExecution(Backend* bn, float slope) : Execution(bn) {
mSlope = slope;
}
virtual ~ReluExecution() = default;
ErrorCode onExecute(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs) override {
auto runtime = static_cast<CUDABackend*>(backend())->getCUDARuntime();
auto count = CUDABackend::realSize(inputs[0]);
int block_num = runtime->blocks_num(count);
int threads_num = runtime->threads_num();
auto input = inputs[0]->deviceId();
auto output = outputs[0]->deviceId();
hipLaunchKernelGGL(( RELU), dim3(block_num), dim3(threads_num), 0, 0, (float*)input, (float*)output, count, mSlope);
return NO_ERROR;
}
private:
float mSlope;
};
__global__ void CLAMP(const float *input, float *output, size_t count, float minV, float maxV) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
float x = input[i];
float y = min(max(x, minV), maxV);
output[i] = y;
}
return;
}
class Relu6Execution : public Execution {
public:
Relu6Execution(Backend* bn, float minV, float maxV) : Execution(bn) {
mMinV = minV;
mMaxV = maxV;
}
virtual ~Relu6Execution() = default;
ErrorCode onExecute(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs) override {
auto runtime = static_cast<CUDABackend*>(backend())->getCUDARuntime();
auto count = CUDABackend::realSize(inputs[0]);
int block_num = runtime->blocks_num(count);
int threads_num = runtime->threads_num();
auto input = inputs[0]->deviceId();
auto output = outputs[0]->deviceId();
hipLaunchKernelGGL(( CLAMP), dim3(block_num), dim3(threads_num), 0, 0, (float*)input, (float*)output, count, mMinV, mMaxV);
return NO_ERROR;
}
private:
float mMinV;
float mMaxV;
};
template <typename T1, typename T2>
__global__ void CAST(T1 *input, T2 *output, size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
output[i] = (T2)(input[i]);
}
return;
}
__global__ void CASTBOOL(int32_t *input, int32_t *output, size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
output[i] = input[i] > 0 ? 1 : 0;
}
return;
}
static DataType _mapDataType(DataType src) {
if (DataType_DT_BOOL == src) {
return DataType_DT_INT32;
}
if (DataType_DT_INT64 == src) {
return DataType_DT_INT32;
}
if (DataType_DT_DOUBLE == src) {
return DataType_DT_FLOAT;
}
return src;
}
class CastExecution : public Execution {
public:
CastExecution(Backend* bn, DataType dstType) : Execution(bn) {
mDst = dstType;
}
virtual ~CastExecution() = default;
ErrorCode onExecute(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs) override {
auto runtime = static_cast<CUDABackend*>(backend())->getCUDARuntime();
auto count = CUDABackend::realSize(inputs[0]);
int block_num = runtime->blocks_num(count);
int threads_num = runtime->threads_num();
auto input = inputs[0]->deviceId();
auto output = outputs[0]->deviceId();
auto dstT = _mapDataType(mDst);
const auto &inputDataType = inputs[0]->getType();
if (inputDataType.bytes() == 4 && mDst == MNN::DataType_DT_BOOL) {
hipLaunchKernelGGL(( CASTBOOL), dim3(block_num), dim3(threads_num), 0, 0, (int32_t*)input, (int32_t*)output, count);
} else if (inputs[0]->buffer().type == outputs[0]->buffer().type) {
runtime->memcpy((void*)output, (void*)input, count * inputDataType.bytes(), MNNMemcpyDeviceToDevice, true);
} else if (dstT == MNN::DataType_DT_INT32 && halide_type_of<float>() == inputDataType) {
hipLaunchKernelGGL(( CAST), dim3(block_num), dim3(threads_num), 0, 0, (float*)input, (int*)output, count);
} else if (dstT == MNN::DataType_DT_FLOAT && halide_type_of<int32_t>() == inputDataType) {
hipLaunchKernelGGL(( CAST), dim3(block_num), dim3(threads_num), 0, 0, (int*)input, (float*)output, count);
} else if (dstT == MNN::DataType_DT_FLOAT && halide_type_of<uint8_t>() == inputDataType) {
hipLaunchKernelGGL(( CAST), dim3(block_num), dim3(threads_num), 0, 0, (uint8_t*)input, (float*)output, count);
} else if (dstT == MNN::DataType_DT_FLOAT && halide_type_of<int8_t>() == inputDataType) {
hipLaunchKernelGGL(( CAST), dim3(block_num), dim3(threads_num), 0, 0, (int8_t*)input, (float*)output, count);
} else if (dstT == MNN::DataType_DT_INT8 && halide_type_of<float>() == inputDataType) {
hipLaunchKernelGGL(( CAST), dim3(block_num), dim3(threads_num), 0, 0, (float*)input, (int8_t*)output, count);
} else if (dstT == MNN::DataType_DT_UINT8 && halide_type_of<float>() == inputDataType) {
hipLaunchKernelGGL(( CAST), dim3(block_num), dim3(threads_num), 0, 0, (float*)input, (uint8_t*)output, count);
} else if (dstT == MNN::DataType_DT_UINT8 && halide_type_of<int32_t>() == inputDataType) {
hipLaunchKernelGGL(( CAST), dim3(block_num), dim3(threads_num), 0, 0, (int32_t*)input, (uint8_t*)output, count);
} else if (dstT == MNN::DataType_DT_INT32 && halide_type_of<uint8_t>() == inputDataType) {
hipLaunchKernelGGL(( CAST), dim3(block_num), dim3(threads_num), 0, 0, (uint8_t*)input, (int32_t*)output, count);
} else if (dstT == MNN::DataType_DT_INT32 && halide_type_of<int8_t>() == inputDataType) {
hipLaunchKernelGGL(( CAST), dim3(block_num), dim3(threads_num), 0, 0, (int8_t*)input, (int32_t*)output, count);
}
return NO_ERROR;
}
private:
DataType mDst;
};
class UnaryCreator : public CUDABackend::Creator {
public:
virtual Execution* onCreate(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs,
const MNN::Op* op, Backend* backend) const override {
if (op->type() == OpType_UnaryOp) {
switch (op->main_as_UnaryOp()->opType()) {
// Dont' support erf function
case UnaryOpOperation_ERF:
case UnaryOpOperation_ERFC:
case UnaryOpOperation_ERFINV:
return nullptr;
default:
return new UnaryExecution(op->main_as_UnaryOp()->opType(), backend);
}
}
if (op->type() == OpType_Sigmoid) {
return new UnaryExecution(UnaryOpOperation_SIGMOID, backend);
}
if (op->type() == OpType_TanH) {
return new UnaryExecution(UnaryOpOperation_TANH, backend);
}
if (op->type() == OpType_ReLU) {
float slope = 0.0f;
if (nullptr != op->main_as_Relu()) {
slope = op->main_as_Relu()->slope();
}
return new ReluExecution(backend, slope);
}
if (op->type() == OpType_ReLU6) {
float minV = 0.0f;
float maxV = 6.0f;
if (nullptr != op->main()) {
auto p = op->main_as_Relu6();
minV = p->minValue();
maxV = p->maxValue();
}
return new Relu6Execution(backend, minV, maxV);
}
if (op->type() == OpType_Cast) {
return new CastExecution(backend, op->main_as_CastParam()->dstT());
}
return nullptr;
}
};
CUDACreatorRegister<UnaryCreator> __UnaryExecution(OpType_UnaryOp);
CUDACreatorRegister<UnaryCreator> __SigmoidExecution(OpType_Sigmoid);
CUDACreatorRegister<UnaryCreator> __TanhExecution(OpType_TanH);
CUDACreatorRegister<UnaryCreator> __ReluExecution(OpType_ReLU);
CUDACreatorRegister<UnaryCreator> __Relu6Execution(OpType_ReLU6);
CUDACreatorRegister<UnaryCreator> __CastExecution(OpType_Cast);
} // namespace CUDA
} // namespace MNN
| aff2ed79a77ada24099b1883cbf2e954ba2fbd07.cu | //
// UnaryExecution.cpp
// MNN
//
// Created by MNN on 2019/02/28.
// Copyright © 2018, Alibaba Group Holding Limited
//
#include "UnaryExecution.hpp"
#include "core/Macro.h"
#include "core/TensorUtils.hpp"
#include "backend/cuda/core/CUDABackend.hpp"
#include <cuda_runtime.h>
namespace MNN {
namespace CUDA {
template <typename T>
__global__ void ABS(T *input, T *output, size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
output[i] = abs(input[i]);
}
return;
}
template <typename T>
__global__ void EXP(T *input, T *output, size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
output[i] = exp(input[i]);
}
return;
}
template <typename T>
__global__ void NEG(T *input, T *output, size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
output[i] = -input[i];
}
return;
}
template <typename T>
__global__ void RECIPROCAL(T *input, T *output, size_t count) {
T one = 1.0;
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
output[i] = one / input[i];
}
return;
}
template <typename T>
__global__ void FLOOR(T *input, T *output, size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
output[i] = floor(input[i]);
}
}
template <typename T>
__global__ void CEIL(T *input, T *output, size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
output[i] = ceil(input[i]);
}
}
template <typename T>
__global__ void SQUARE(T *input, T *output, size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
output[i] = input[i] * input[i];
}
return;
}
template <typename T>
__global__ void SQRT(T *input, T *output, size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
output[i] = sqrt(input[i]);
}
return;
}
template <typename T>
__global__ void RSQRT(T *input, T *output, size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
output[i] = rsqrt(input[i]);
}
return;
}
template <typename T>
__global__ void LOG(T *input, T *output, size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
output[i] = log(input[i]);
}
return;
}
template <typename T>
__global__ void SIN(T *input, T *output, size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
output[i] = sin(input[i]);
}
return;
}
template <typename T>
__global__ void COS(T *input, T *output, size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
output[i] = cos(input[i]);
}
return;
}
template <typename T>
__global__ void TAN(T *input, T *output, size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
output[i] = tan(input[i]);
}
return;
}
template <typename T>
__global__ void ASIN(T *input, T *output, size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
output[i] = asin(input[i]);
}
return;
}
template <typename T>
__global__ void ACOS(T *input, T *output, size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
output[i] = acos(input[i]);
}
return;
}
template <typename T>
__global__ void ATAN(T *input, T *output, size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
output[i] = atan(input[i]);
}
return;
}
template <typename T>
__global__ void LOG1P(T *input, T *output, size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
output[i] = log(1+input[i]);
}
return;
}
template <typename T>
__global__ void TANH(T *input, T *output, size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
output[i] = tanh(input[i]);
}
return;
}
template <typename T>
__global__ void SIGMOID(T *input, T *output, size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
output[i] = 1. / (1. + exp(-input[i]));
}
return;
}
template <typename T>
__global__ void EXPM1(T *input, T *output, size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
output[i] = exp(input[i]) - 1;
}
return;
}
template <typename T>
__global__ void ATANH(T *input, T *output, size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
output[i] = atanh(input[i]);
}
return;
}
template <typename T>
__global__ void ACOSH(T *input, T *output, size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
output[i] = acosh(input[i]);
}
return;
}
template <typename T>
__global__ void SIGN(T *input, T *output, size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
T x = input[i];
output[i] = x > 0 ? 1 : (x<0 ? -1 : 0);
}
return;
}
template <typename T>
__global__ void COSH(T *input, T *output, size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
output[i] = cosh(input[i]);
}
return;
}
template <typename T>
__global__ void ROUND(T *input, T *output, size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
output[i] = round(input[i]);
}
return;
}
template <typename T>
__global__ void SINH(T *input, T *output, size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
output[i] = sinh(input[i]);
}
return;
}
template <typename T>
__global__ void ASINH(T *input, T *output, size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
output[i] = asinh(input[i]);
}
return;
}
template <typename T>
__global__ void HARDSWISH(T *input, T *output, size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
if (input[i] <= -3) {
output[i] = 0;
} else if (input[i] >= 3) {
output[i] = input[i];
} else {
output[i] = input[i] * (input[i] + 3) / 6;
}
}
return;
}
void callUnary(void *input, void *output, size_t count, MNN::CUDARuntime* runtime, halide_type_t data_type,
MNN::UnaryOpOperation op_type)
{
int block_num = runtime->blocks_num(count);
int threads_num = runtime->threads_num() > count ? count : runtime->threads_num();
#define COMPUTE(TYPE)\
if (op_type == MNN::UnaryOpOperation_##TYPE ) { TYPE<<<block_num, threads_num>>>((float*)input, (float*)output, count); return;};
COMPUTE(ABS);
COMPUTE(NEG);
COMPUTE(FLOOR);
COMPUTE(CEIL);
COMPUTE(SQUARE);
COMPUTE(SQRT);
COMPUTE(RSQRT);
COMPUTE(EXP);
COMPUTE(LOG);
COMPUTE(SIN);
COMPUTE(COS);
COMPUTE(TAN);
COMPUTE(ASIN);
COMPUTE(ACOS);
COMPUTE(ATAN);
COMPUTE(RECIPROCAL);
COMPUTE(LOG1P);
COMPUTE(TANH);
COMPUTE(SIGMOID);
COMPUTE(EXPM1);
COMPUTE(ACOSH);
COMPUTE(ATANH);
COMPUTE(SIGN);
COMPUTE(COSH);
COMPUTE(ROUND);
COMPUTE(SINH);
COMPUTE(ASINH);
COMPUTE(HARDSWISH);
//case CudaUnaryOpOperation_BNLL:
//case CudaUnaryOpOperation_ERF:
//case CudaUnaryOpOperation_ERFC:
//case CudaUnaryOpOperation_ERFINV:
return;
}
UnaryExecution::UnaryExecution(UnaryOpOperation opType, Backend* backend) : Execution(backend) {
auto cudaBackend = static_cast<CUDABackend*>(backend);
mRuntime = cudaBackend->getCUDARuntime();
mOpType = opType;
}
ErrorCode UnaryExecution::onResize(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs) {
auto shape = inputs[0]->shape();
mCount = CUDABackend::realSize(inputs[0]);
return NO_ERROR;
}
ErrorCode UnaryExecution::onExecute(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs) {
#ifdef LOG_VERBOSE
MNN_PRINT("start UnaryExecution onExecute...");
#endif
auto type = inputs[0]->getType();
callUnary((void*)inputs[0]->deviceId(), (void*)outputs[0]->deviceId(), mCount, mRuntime, type, mOpType);
#ifdef LOG_VERBOSE
MNN_PRINT("end UnaryExecution onExecute...");
#endif
return NO_ERROR;
}
__global__ void RELU(const float *input, float *output, size_t count, float slope) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
float x = input[i];
float y = x > 0 ? x : x * slope;
output[i] = y;
}
return;
}
class ReluExecution : public Execution {
public:
ReluExecution(Backend* bn, float slope) : Execution(bn) {
mSlope = slope;
}
virtual ~ReluExecution() = default;
ErrorCode onExecute(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs) override {
auto runtime = static_cast<CUDABackend*>(backend())->getCUDARuntime();
auto count = CUDABackend::realSize(inputs[0]);
int block_num = runtime->blocks_num(count);
int threads_num = runtime->threads_num();
auto input = inputs[0]->deviceId();
auto output = outputs[0]->deviceId();
RELU<<<block_num, threads_num>>>((float*)input, (float*)output, count, mSlope);
return NO_ERROR;
}
private:
float mSlope;
};
__global__ void CLAMP(const float *input, float *output, size_t count, float minV, float maxV) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
float x = input[i];
float y = min(max(x, minV), maxV);
output[i] = y;
}
return;
}
class Relu6Execution : public Execution {
public:
Relu6Execution(Backend* bn, float minV, float maxV) : Execution(bn) {
mMinV = minV;
mMaxV = maxV;
}
virtual ~Relu6Execution() = default;
ErrorCode onExecute(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs) override {
auto runtime = static_cast<CUDABackend*>(backend())->getCUDARuntime();
auto count = CUDABackend::realSize(inputs[0]);
int block_num = runtime->blocks_num(count);
int threads_num = runtime->threads_num();
auto input = inputs[0]->deviceId();
auto output = outputs[0]->deviceId();
CLAMP<<<block_num, threads_num>>>((float*)input, (float*)output, count, mMinV, mMaxV);
return NO_ERROR;
}
private:
float mMinV;
float mMaxV;
};
template <typename T1, typename T2>
__global__ void CAST(T1 *input, T2 *output, size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
output[i] = (T2)(input[i]);
}
return;
}
__global__ void CASTBOOL(int32_t *input, int32_t *output, size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
output[i] = input[i] > 0 ? 1 : 0;
}
return;
}
static DataType _mapDataType(DataType src) {
if (DataType_DT_BOOL == src) {
return DataType_DT_INT32;
}
if (DataType_DT_INT64 == src) {
return DataType_DT_INT32;
}
if (DataType_DT_DOUBLE == src) {
return DataType_DT_FLOAT;
}
return src;
}
class CastExecution : public Execution {
public:
CastExecution(Backend* bn, DataType dstType) : Execution(bn) {
mDst = dstType;
}
virtual ~CastExecution() = default;
ErrorCode onExecute(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs) override {
auto runtime = static_cast<CUDABackend*>(backend())->getCUDARuntime();
auto count = CUDABackend::realSize(inputs[0]);
int block_num = runtime->blocks_num(count);
int threads_num = runtime->threads_num();
auto input = inputs[0]->deviceId();
auto output = outputs[0]->deviceId();
auto dstT = _mapDataType(mDst);
const auto &inputDataType = inputs[0]->getType();
if (inputDataType.bytes() == 4 && mDst == MNN::DataType_DT_BOOL) {
CASTBOOL<<<block_num, threads_num>>>((int32_t*)input, (int32_t*)output, count);
} else if (inputs[0]->buffer().type == outputs[0]->buffer().type) {
runtime->memcpy((void*)output, (void*)input, count * inputDataType.bytes(), MNNMemcpyDeviceToDevice, true);
} else if (dstT == MNN::DataType_DT_INT32 && halide_type_of<float>() == inputDataType) {
CAST<<<block_num, threads_num>>>((float*)input, (int*)output, count);
} else if (dstT == MNN::DataType_DT_FLOAT && halide_type_of<int32_t>() == inputDataType) {
CAST<<<block_num, threads_num>>>((int*)input, (float*)output, count);
} else if (dstT == MNN::DataType_DT_FLOAT && halide_type_of<uint8_t>() == inputDataType) {
CAST<<<block_num, threads_num>>>((uint8_t*)input, (float*)output, count);
} else if (dstT == MNN::DataType_DT_FLOAT && halide_type_of<int8_t>() == inputDataType) {
CAST<<<block_num, threads_num>>>((int8_t*)input, (float*)output, count);
} else if (dstT == MNN::DataType_DT_INT8 && halide_type_of<float>() == inputDataType) {
CAST<<<block_num, threads_num>>>((float*)input, (int8_t*)output, count);
} else if (dstT == MNN::DataType_DT_UINT8 && halide_type_of<float>() == inputDataType) {
CAST<<<block_num, threads_num>>>((float*)input, (uint8_t*)output, count);
} else if (dstT == MNN::DataType_DT_UINT8 && halide_type_of<int32_t>() == inputDataType) {
CAST<<<block_num, threads_num>>>((int32_t*)input, (uint8_t*)output, count);
} else if (dstT == MNN::DataType_DT_INT32 && halide_type_of<uint8_t>() == inputDataType) {
CAST<<<block_num, threads_num>>>((uint8_t*)input, (int32_t*)output, count);
} else if (dstT == MNN::DataType_DT_INT32 && halide_type_of<int8_t>() == inputDataType) {
CAST<<<block_num, threads_num>>>((int8_t*)input, (int32_t*)output, count);
}
return NO_ERROR;
}
private:
DataType mDst;
};
class UnaryCreator : public CUDABackend::Creator {
public:
virtual Execution* onCreate(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs,
const MNN::Op* op, Backend* backend) const override {
if (op->type() == OpType_UnaryOp) {
switch (op->main_as_UnaryOp()->opType()) {
// Dont' support erf function
case UnaryOpOperation_ERF:
case UnaryOpOperation_ERFC:
case UnaryOpOperation_ERFINV:
return nullptr;
default:
return new UnaryExecution(op->main_as_UnaryOp()->opType(), backend);
}
}
if (op->type() == OpType_Sigmoid) {
return new UnaryExecution(UnaryOpOperation_SIGMOID, backend);
}
if (op->type() == OpType_TanH) {
return new UnaryExecution(UnaryOpOperation_TANH, backend);
}
if (op->type() == OpType_ReLU) {
float slope = 0.0f;
if (nullptr != op->main_as_Relu()) {
slope = op->main_as_Relu()->slope();
}
return new ReluExecution(backend, slope);
}
if (op->type() == OpType_ReLU6) {
float minV = 0.0f;
float maxV = 6.0f;
if (nullptr != op->main()) {
auto p = op->main_as_Relu6();
minV = p->minValue();
maxV = p->maxValue();
}
return new Relu6Execution(backend, minV, maxV);
}
if (op->type() == OpType_Cast) {
return new CastExecution(backend, op->main_as_CastParam()->dstT());
}
return nullptr;
}
};
CUDACreatorRegister<UnaryCreator> __UnaryExecution(OpType_UnaryOp);
CUDACreatorRegister<UnaryCreator> __SigmoidExecution(OpType_Sigmoid);
CUDACreatorRegister<UnaryCreator> __TanhExecution(OpType_TanH);
CUDACreatorRegister<UnaryCreator> __ReluExecution(OpType_ReLU);
CUDACreatorRegister<UnaryCreator> __Relu6Execution(OpType_ReLU6);
CUDACreatorRegister<UnaryCreator> __CastExecution(OpType_Cast);
} // namespace CUDA
} // namespace MNN
|
4285e1326e6d3361057d182fce96c7827a9e7a19.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*-------------------------------------------------------------------------
*
* MATLAB MEX functions for TV image denoising. Check inputs and parses
* MATLAB data to C++ data.
*
*
* CODE by Imanol Luengo
* PhD student University of Nottingham
* [email protected]
* 2015
* Modified by Ander Biguri for multi-GPU
* ---------------------------------------------------------------------------
* ---------------------------------------------------------------------------
* Copyright (c) 2015, University of Bath and CERN- European Organization for
* Nuclear Research
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* ---------------------------------------------------------------------------
*
* Contact: [email protected]
* Codes : https://github.com/CERN/TIGRE
* ---------------------------------------------------------------------------
*/
// http://gpu4vision.icg.tugraz.at/papers/2010/knoll.pdf#pub47
#define MAXTREADS 1024
#define MAX_BUFFER 60
#define BLOCK_SIZE 10 // BLOCK_SIZE^3 must be smaller than MAXTREADS
#include "tvdenoising.hpp"
#define cudaCheckErrors(msg) \
do { \
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
hipDeviceReset();\
mexPrintf("%s \n",msg);\
mexErrMsgIdAndTxt("CBCT:CUDA:TVdenoising",hipGetErrorString(__err));\
} \
} while (0)
__device__ __inline__
float divergence(const float* pz, const float* py, const float* px,
long z, long y, long x, long depth, long rows, long cols,
float dz, float dy, float dx)
{
long size2d = rows*cols;
long idx = z * size2d + y * cols + x;
float _div = 0.0f;
if ( z - 1 >= 0 ) {
_div += (pz[idx] - pz[(z-1)*size2d + y*cols + x]) / dz;
} else {
_div += pz[idx];
}
if ( y - 1 >= 0 ) {
_div += (py[idx] - py[z*size2d + (y-1)*cols + x]) / dy;
} else {
_div += py[idx];
}
if ( x - 1 >= 0 ) {
_div += (px[idx] - px[z*size2d + y*cols + (x-1)]) / dx;
} else {
_div += px[idx];
}
return _div;
}
__device__ __inline__
void gradient(const float* u, float* grad,
long z, long y, long x,
long depth, long rows, long cols,
float dz, float dy, float dx)
{
long size2d = rows*cols;
long idx = z * size2d + y * cols + x;
float uidx = u[idx];
if ( z + 1 < depth ) {
grad[0] = (u[(z+1)*size2d + y*cols + x] - uidx) / dz;
}
if ( y + 1 < rows ) {
grad[1] = (u[z*size2d + (y+1)*cols + x] - uidx) / dy;
}
if ( x + 1 < cols ) {
grad[2] = (u[z*size2d + y*cols + (x+1)] - uidx) / dx;
}
}
__global__
void update_u(const float* f, const float* pz, const float* py, const float* px, float* u,
float tau, float lambda,
long depth, long rows, long cols,
float dz, float dy, float dx)
{
long x = threadIdx.x + blockIdx.x * blockDim.x;
long y = threadIdx.y + blockIdx.y * blockDim.y;
long z = threadIdx.z + blockIdx.z * blockDim.z;
long idx = z * rows * cols + y * cols + x;
if ( x >= cols || y >= rows || z >= depth )
return;
float _div = divergence(pz, py, px, z, y, x, depth, rows, cols, dz, dy, dx);
u[idx] = u[idx] * (1.0f - tau) + tau * (f[idx] + (1.0f/lambda) * _div);
}
__global__
void update_p(const float* u, float* pz, float* py, float* px,
float tau, long depth, long rows, long cols,
float dz, float dy, float dx)
{
long x = threadIdx.x + blockIdx.x * blockDim.x;
long y = threadIdx.y + blockIdx.y * blockDim.y;
long z = threadIdx.z + blockIdx.z * blockDim.z;
long idx = z * rows * cols + y * cols + x;
if ( x >= cols || y >= rows || z >= depth )
return;
float grad[3] = {0,0,0}, q[3];
gradient(u, grad, z, y, x, depth, rows, cols, dz, dy, dx);
q[0] = pz[idx] + tau * grad[0];
q[1] = py[idx] + tau * grad[1];
q[2] = px[idx] + tau * grad[2];
float norm = fmaxf(1.0f, sqrtf(q[0] * q[0] + q[1] * q[1] + q[2] * q[2]));
pz[idx] = q[0] / norm;
py[idx] = q[1] / norm;
px[idx] = q[2] / norm;
}
// Main function
void tvdenoising(float* src, float* dst, float lambda,
const float* spacing, const long* image_size, int maxIter){
// Prepare for MultiGPU
int deviceCount = 0;
hipGetDeviceCount(&deviceCount);
cudaCheckErrors("Device query fail");
if (deviceCount == 0) {
mexErrMsgIdAndTxt("tvDenoise:tvdenoising:GPUselect","There are no available device(s) that support CUDA\n");
}
//
// CODE assumes
// 1.-All available devices are usable by this code
// 2.-All available devices are equal, they are the same machine (warning trhown)
int dev;
char * devicenames;
hipDeviceProp_t deviceProp;
for (dev = 0; dev < deviceCount; dev++) {
hipSetDevice(dev);
hipGetDeviceProperties(&deviceProp, dev);
if (dev>0){
if (strcmp(devicenames,deviceProp.name)!=0){
mexWarnMsgIdAndTxt("tvDenoise:tvdenoising:GPUselect","Detected one (or more) different GPUs.\n This code is not smart enough to separate the memory GPU wise if they have different computational times or memory limits.\n First GPU parameters used. If the code errors you might need to change the way GPU selection is performed. \n POCS_TV.cu line 277.");
break;
}
}
devicenames=deviceProp.name;
}
// We don't know if the devices are being used. lets check that. and only use the amount of memory we need.
size_t mem_GPU_global;
checkFreeMemory(deviceCount,&mem_GPU_global);
// %5 of free memory shoudl be enough, we have almsot no variables in these kernels
size_t total_pixels = image_size[0] * image_size[1] * image_size[2] ;
size_t mem_slice_image = sizeof(float)* image_size[0] * image_size[1] ;
size_t mem_size_image = sizeof(float)* total_pixels;
// Decide how are we handling the distribution of computation
size_t mem_img_each_GPU;
unsigned int buffer_length=1;
//Does everything fit in the GPU?
unsigned int slices_per_split;
unsigned int splits=1; // if the number does not fit in an uint, you have more serious trouble than this.
if(mem_GPU_global> 5*mem_size_image+5*mem_slice_image*buffer_length*2){
// We only need to split if we have extra GPUs
slices_per_split=(image_size[2]+deviceCount-1)/deviceCount;
mem_img_each_GPU=mem_slice_image*( (image_size[2]+deviceCount-1)/deviceCount + buffer_length*2);
}else{
// As mem_auxiliary is not expected to be a large value (for a 2000^3 image is around 28Mbytes), lets for now assume we need it all
size_t mem_free=mem_GPU_global;
splits=(unsigned int)(ceil(((float)(5*mem_size_image)/(float)(deviceCount))/mem_free));
// Now, there is an overhead here, as each splits should have 2 slices more, to accoutn for overlap of images.
// lets make sure these 2 slices fit, if they do not, add 1 to splits.
slices_per_split=(image_size[2]+deviceCount*splits-1)/(deviceCount*splits);
mem_img_each_GPU=(mem_slice_image*(slices_per_split+buffer_length*2));
// if the new stuff does not fit in the GPU, it measn we are in the edge case where adding that extra slice will overflow memory
if (mem_GPU_global< 5*mem_img_each_GPU){
// one more splot shoudl do the job, as its an edge case.
splits++;
//recompute for later
slices_per_split=(image_size[2]+deviceCount*splits-1)/(deviceCount*splits); // amountf of slices that fit on a GPU. Later we add 2 to these, as we need them for overlap
mem_img_each_GPU=(mem_slice_image*(slices_per_split+buffer_length*2));
}
// How many EXTRA buffer slices shoudl be able to fit in here??!?!
mem_free=mem_GPU_global-(5*mem_img_each_GPU);
unsigned int extra_buff=(mem_free/mem_slice_image);
buffer_length=(extra_buff/2)/5; // we need double whatever this results in, rounded down.
buffer_length=min(MAX_BUFFER,buffer_length);
mem_img_each_GPU=(mem_slice_image*(slices_per_split+buffer_length*2));
// Assert
if (mem_GPU_global< 5*mem_img_each_GPU){
mexErrMsgIdAndTxt("tvDenoise:tvdenoising:GPU","Bad assert. Logic behind spliting flawed! Please tell: [email protected]\n");
}
}
// Lets try to make the host memory pinned:
// We laredy queried the GPU and assuemd they are the same, thus shoudl have the same attributes.
int isHostRegisterSupported;
hipDeviceGetAttribute(&isHostRegisterSupported,hipDeviceAttributeHostRegisterSupported,0);
if (isHostRegisterSupported & splits>1){
hipHostRegister(src ,image_size[2]*image_size[1]*image_size[0]*sizeof(float),hipHostRegisterPortable);
hipHostRegister(dst ,image_size[2]*image_size[1]*image_size[0]*sizeof(float),hipHostRegisterPortable);
}
cudaCheckErrors("Error pinning memory");
// Lets allocate auxiliary variables.
float* buffer_u, *buffer_px, *buffer_py, *buffer_pz;
float* h_px, *h_py, *h_pz, *h_u;
if(splits>1){
//These take A LOT of memory and A LOT of time to use. If we can avoid using them, better.
if (buffer_length<maxIter){ // if we do only 1 big iter, they are not needed.
mexWarnMsgIdAndTxt("tvDenoise:tvdenoising:Memory","TV dneoising requires 5 times the image memory. Your GPU(s) do not have the required memory.\n This memory will be attempted to allocate on the CPU, Whic may fail or slow the computation by a very significant amount.\n If you want to kill the execution: CTRL+C");
hipHostMalloc((void**)&h_px,image_size[0]*image_size[1]*image_size[2]*sizeof(float));
cudaCheckErrors("Malloc error on auxiliary variables on CPU.\n Your image is too big to use SART_TV or im3Ddenoise in your current machine");
hipHostMalloc((void**)&h_py,image_size[0]*image_size[1]*image_size[2]*sizeof(float));
cudaCheckErrors("Malloc error on auxiliary variables on CPU.\n Your image is too big to use SART_TV or im3Ddenoise in your current machine");
hipHostMalloc((void**)&h_pz,image_size[0]*image_size[1]*image_size[2]*sizeof(float));
cudaCheckErrors("Malloc error on auxiliary variables on CPU.\n Your image is too big to use SART_TV or im3Ddenoise in your current machine");
}
h_u=dst;
}else{
hipHostMalloc((void**)&buffer_u, image_size[0]*image_size[1]*sizeof(float));
hipHostMalloc((void**)&buffer_px,image_size[0]*image_size[1]*sizeof(float));
hipHostMalloc((void**)&buffer_py,image_size[0]*image_size[1]*sizeof(float));
hipHostMalloc((void**)&buffer_pz,image_size[0]*image_size[1]*sizeof(float));
}
// We shoudl be good to go memory wise.
float** d_src =(float**)malloc(deviceCount*sizeof(float*));
float** d_u =(float**)malloc(deviceCount*sizeof(float*));
float** d_px =(float**)malloc(deviceCount*sizeof(float*));
float** d_py =(float**)malloc(deviceCount*sizeof(float*));
float** d_pz =(float**)malloc(deviceCount*sizeof(float*));
//Malloc
for(dev=0;dev<deviceCount;dev++){
hipSetDevice(dev);
// F
hipMalloc((void**)&d_src[dev], mem_img_each_GPU);
// U
hipMalloc((void**)&d_u [dev], mem_img_each_GPU);
// PX
hipMalloc((void**)&d_px[dev], mem_img_each_GPU);
// PY
hipMalloc((void**)&d_py[dev], mem_img_each_GPU);
// PZ
hipMalloc((void**)&d_pz[dev], mem_img_each_GPU);
}
hipDeviceSynchronize();
cudaCheckErrors("Malloc error");
// Create streams
int nStream_device=5;
int nStreams=deviceCount*nStream_device;
hipStream_t* stream=(hipStream_t*)malloc(nStreams*sizeof(hipStream_t));
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
for (int i = 0; i < nStream_device; ++i){
hipStreamCreate(&stream[i+dev*nStream_device]);
}
}
cudaCheckErrors("Stream creation fail");
// Allocate CPU buffer if needed, warn user if not.
unsigned int curr_slices;
unsigned long long curr_pixels;
size_t linear_idx_start;
unsigned long long buffer_pixels=buffer_length*image_size[0]*image_size[1];
unsigned long long* offset_device=(unsigned long long*)malloc(deviceCount*sizeof(unsigned long long));
unsigned long long* offset_host =(unsigned long long*)malloc(deviceCount*sizeof(unsigned long long));
unsigned long long* bytes_device =(unsigned long long*)malloc(deviceCount*sizeof(unsigned long long));
bool is_first_chunk;
bool is_last_chunk;
float tau2, tau1;
for(unsigned int i=0;i<maxIter;i+=(buffer_length)){
for(unsigned int sp=0;sp<splits;sp++){
// For each iteration we need to comptue all the image. The ordering of these loops
// need to be like this due to the boudnign layers between slpits. If more than 1 split is needed
// for each GPU then there is no other way that taking the entire memory out of GPU and putting it back.
// If the memory can be shared ebtween GPUs fully without extra splits, then there is an easy way of syncronizing the memory
// Copy image to memory
for (dev = 0; dev < deviceCount; dev++){
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
curr_pixels=curr_slices*image_size[0]*image_size[1];
linear_idx_start=image_size[0]*image_size[1]*slices_per_split*(sp*deviceCount+dev);
// Check if its the first or last chunck
is_last_chunk=!((sp*deviceCount+dev)<deviceCount*splits-1);
is_first_chunk=!(sp*deviceCount+dev);
// lets compute where we start copyes and how much. This avoids 3 calls to Memcpy
offset_device[dev]=buffer_pixels*is_first_chunk;
offset_host[dev]=linear_idx_start-buffer_pixels*!is_first_chunk;
bytes_device[dev]=curr_pixels+buffer_pixels*!is_first_chunk+buffer_pixels*!is_last_chunk;
}
if(i==0){
// Precompute indices and needed bytes
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipMemcpyAsync(d_src[dev]+offset_device[dev], src+offset_host[dev] , bytes_device[dev]*sizeof(float), hipMemcpyHostToDevice,stream[dev*nStream_device+1]);
}
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
// All these are async
hipMemcpyAsync(d_u[dev] +offset_device[dev], d_src[dev]+offset_device[dev], bytes_device[dev]*sizeof(float), hipMemcpyDeviceToDevice,stream[dev*nStream_device+1]);
hipMemsetAsync(d_px[dev], 0, mem_img_each_GPU,stream[dev*nStream_device]);
hipMemsetAsync(d_py[dev], 0, mem_img_each_GPU,stream[dev*nStream_device]);
hipMemsetAsync(d_pz[dev], 0, mem_img_each_GPU,stream[dev*nStream_device]);
}
// we need all the stream to finish
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipDeviceSynchronize();
}
cudaCheckErrors("Memcpy failure");
}
// if we need to split and its not the first iteration, then we need to copy from Host memory.
// d_src is the original image, with no change.
if (splits>1 & i>0){
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipStreamSynchronize(stream[dev*nStream_device+1]);
hipMemcpyAsync(d_u [dev] +offset_device[dev], h_u +offset_host[dev], bytes_device[dev]*sizeof(float), hipMemcpyHostToDevice,stream[dev*nStream_device+1]);
}
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipStreamSynchronize(stream[dev*nStream_device+2]);
hipMemcpyAsync(d_px[dev]+offset_device[dev], h_px+offset_host[dev], bytes_device[dev]*sizeof(float), hipMemcpyHostToDevice,stream[dev*nStream_device+2]);
}
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipStreamSynchronize(stream[dev*nStream_device+3]);
hipMemcpyAsync(d_py[dev] +offset_device[dev], h_py+offset_host[dev], bytes_device[dev]*sizeof(float), hipMemcpyHostToDevice,stream[dev*nStream_device+3]);
}
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipStreamSynchronize(stream[dev*nStream_device+4]);
hipMemcpyAsync(d_pz[dev] +offset_device[dev], h_pz+offset_host[dev], bytes_device[dev]*sizeof(float), hipMemcpyHostToDevice,stream[dev*nStream_device+4]);
}
for (dev = 0; dev < deviceCount; dev++){
hipStreamSynchronize(stream[dev*nStream_device+1]);
hipMemcpyAsync(d_src[dev]+offset_device[dev], src +offset_host[dev], bytes_device[dev]*sizeof(float), hipMemcpyHostToDevice,stream[dev*nStream_device+1]);
}
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipDeviceSynchronize();
cudaCheckErrors("Memcpy failure on multi split");
}
}
// Inter interations.
for(unsigned int ib=0; (ib<(buffer_length)) && ((i+ib)<maxIter); ib++){
tau2 = 0.3f + 0.02f * (i+ib);
tau1 = (1.f/tau2) * ((1.f/6.f) - (5.f/(15.f+(i+ib))));
// bdim and gdim
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
dim3 block(BLOCK_SIZE, BLOCK_SIZE, BLOCK_SIZE);
dim3 grid((image_size[0]+block.x-1)/block.x, (image_size[1]+block.y-1)/block.y, (curr_slices+buffer_length*2+block.z-1)/block.z);
hipLaunchKernelGGL(( update_u), dim3(grid), dim3(block),0,stream[dev*nStream_device], d_src[dev], d_pz[dev], d_py[dev], d_px[dev], d_u[dev], tau1, lambda,
(long)(curr_slices+buffer_length*2), image_size[1],image_size[0],
spacing[2], spacing[1], spacing[0]);
}
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
dim3 block(BLOCK_SIZE, BLOCK_SIZE, BLOCK_SIZE);
dim3 grid((image_size[0]+block.x-1)/block.x, (image_size[1]+block.y-1)/block.y, (curr_slices+buffer_length*2+block.z-1)/block.z);
hipLaunchKernelGGL(( update_p), dim3(grid), dim3(block),0,stream[dev*nStream_device], d_u[dev], d_pz[dev], d_py[dev], d_px[dev], tau2,
(long)(curr_slices+buffer_length*2), image_size[1], image_size[0],
spacing[2], spacing[1], spacing[0]);
}
}// END internal iter
// Syncronize mathematics, make sure bounding pixels are correct
for(dev=0; dev<deviceCount;dev++){
hipSetDevice(dev);
hipDeviceSynchronize();
}
if(splits==1){
for(dev=0; dev<deviceCount;dev++){
if (dev<deviceCount-1){
// U
hipSetDevice(dev+1);
hipMemcpyAsync(buffer_u , d_u[dev+1] , buffer_pixels*sizeof(float), hipMemcpyDeviceToHost,stream[(dev+1)*nStream_device+1]);
hipMemcpyAsync(buffer_px, d_px[dev+1], buffer_pixels*sizeof(float), hipMemcpyDeviceToHost,stream[(dev+1)*nStream_device+2]);
hipMemcpyAsync(buffer_py, d_py[dev+1], buffer_pixels*sizeof(float), hipMemcpyDeviceToHost,stream[(dev+1)*nStream_device+3]);
hipMemcpyAsync(buffer_pz, d_pz[dev+1], buffer_pixels*sizeof(float), hipMemcpyDeviceToHost,stream[(dev+1)*nStream_device+4]);
hipSetDevice(dev);
hipStreamSynchronize(stream[(dev+1)*nStream_device+1]);
hipMemcpyAsync(d_u[dev] +slices_per_split+buffer_pixels, buffer_u , buffer_pixels*sizeof(float), hipMemcpyHostToDevice,stream[(dev)*nStream_device+1]);
hipStreamSynchronize(stream[(dev+1)*nStream_device+2]);
hipMemcpyAsync(d_px[dev]+slices_per_split+buffer_pixels, buffer_px, buffer_pixels*sizeof(float), hipMemcpyHostToDevice,stream[(dev)*nStream_device+2]);
hipStreamSynchronize(stream[(dev+1)*nStream_device+3]);
hipMemcpyAsync(d_py[dev]+slices_per_split+buffer_pixels, buffer_py, buffer_pixels*sizeof(float), hipMemcpyHostToDevice,stream[(dev)*nStream_device+3]);
hipStreamSynchronize(stream[(dev+1)*nStream_device+4]);
hipMemcpyAsync(d_pz[dev]+slices_per_split+buffer_pixels, buffer_pz, buffer_pixels*sizeof(float), hipMemcpyHostToDevice,stream[(dev)*nStream_device+4]);
}
hipDeviceSynchronize();
if (dev>0){
// U
hipSetDevice(dev-1);
hipMemcpyAsync(buffer_u, d_u[dev-1] +slices_per_split+buffer_pixels, buffer_pixels*sizeof(float), hipMemcpyDeviceToHost,stream[(dev-1)*nStream_device+1]);
hipMemcpyAsync(buffer_px, d_px[dev-1]+slices_per_split+buffer_pixels, buffer_pixels*sizeof(float), hipMemcpyDeviceToHost,stream[(dev-1)*nStream_device+2]);
hipMemcpyAsync(buffer_py, d_py[dev-1]+slices_per_split+buffer_pixels, buffer_pixels*sizeof(float), hipMemcpyDeviceToHost,stream[(dev-1)*nStream_device+3]);
hipMemcpyAsync(buffer_pz, d_pz[dev-1]+slices_per_split+buffer_pixels, buffer_pixels*sizeof(float), hipMemcpyDeviceToHost,stream[(dev-1)*nStream_device+4]);
hipSetDevice(dev);
hipStreamSynchronize(stream[(dev-1)*nStream_device+1]);
hipMemcpyAsync(d_u[dev] ,buffer_u , buffer_pixels*sizeof(float), hipMemcpyHostToDevice,stream[(dev)*nStream_device+1]);
hipStreamSynchronize(stream[(dev-1)*nStream_device+2]);
hipMemcpyAsync(d_px[dev],buffer_px, buffer_pixels*sizeof(float), hipMemcpyHostToDevice,stream[(dev)*nStream_device+2]);
hipStreamSynchronize(stream[(dev-1)*nStream_device+3]);
hipMemcpyAsync(d_py[dev],buffer_py, buffer_pixels*sizeof(float), hipMemcpyHostToDevice,stream[(dev)*nStream_device+3]);
hipStreamSynchronize(stream[(dev-1)*nStream_device+4]);
hipMemcpyAsync(d_pz[dev],buffer_pz, buffer_pixels*sizeof(float), hipMemcpyHostToDevice,stream[(dev)*nStream_device+4]);
}
}
}else{
// We need to take it out :(
for(dev=0; dev<deviceCount;dev++){
hipSetDevice(dev);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
linear_idx_start=image_size[0]*image_size[1]*slices_per_split*(sp*deviceCount+dev);
total_pixels=curr_slices*image_size[0]*image_size[1];
hipMemcpyAsync(&h_u[linear_idx_start], d_u [dev]+buffer_pixels,total_pixels*sizeof(float), hipMemcpyDeviceToHost,stream[dev*nStream_device+1]);
}
if ((i+buffer_length)<maxIter){ // If its the last iteration, we dont need to get these out.
for(dev=0; dev<deviceCount;dev++){
hipSetDevice(dev);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
linear_idx_start=image_size[0]*image_size[1]*slices_per_split*(sp*deviceCount+dev);
total_pixels=curr_slices*image_size[0]*image_size[1];
hipMemcpyAsync(&h_px[linear_idx_start], d_px[dev]+buffer_pixels,total_pixels*sizeof(float), hipMemcpyDeviceToHost,stream[dev*nStream_device+2]);
hipMemcpyAsync(&h_py[linear_idx_start], d_py[dev]+buffer_pixels,total_pixels*sizeof(float), hipMemcpyDeviceToHost,stream[dev*nStream_device+3]);
hipMemcpyAsync(&h_pz[linear_idx_start], d_pz[dev]+buffer_pixels,total_pixels*sizeof(float), hipMemcpyDeviceToHost,stream[dev*nStream_device+4]);
}
}
}
}//END splits
}//END main iter
for(dev=0; dev<deviceCount;dev++){
hipSetDevice(dev);
hipDeviceSynchronize();
}
cudaCheckErrors("TV minimization");
if(splits==1){
for(dev=0; dev<deviceCount;dev++){
hipSetDevice(dev);
curr_slices=((dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*dev;
total_pixels=curr_slices*image_size[0]*image_size[1];
hipMemcpyAsync(dst+slices_per_split*image_size[0]*image_size[1]*dev, d_u[dev]+buffer_pixels,total_pixels*sizeof(float), hipMemcpyDeviceToHost,stream[dev*nStream_device+1]);
}
}
for(dev=0; dev<deviceCount;dev++){
hipSetDevice(dev);
hipDeviceSynchronize();
}
cudaCheckErrors("Copy result back");
for(dev=0; dev<deviceCount;dev++){
hipFree(d_src[dev]);
hipFree(d_u [dev]);
hipFree(d_pz[dev]);
hipFree(d_py[dev]);
hipFree(d_px[dev]);
}
if(splits>1 && buffer_length<maxIter){
hipHostFree(h_px);
hipHostFree(h_py);
hipHostFree(h_pz);
}else if(splits==1){
hipHostFree(buffer_u);
hipHostFree(buffer_px);
hipHostFree(buffer_py);
hipHostFree(buffer_pz);
}
for (int i = 0; i < nStreams; ++i)
hipStreamDestroy(stream[i]) ;
if (isHostRegisterSupported & splits>1){
hipHostUnregister(src);
hipHostUnregister(dst);
}
for(dev=0; dev<deviceCount;dev++){
hipSetDevice(dev);
hipDeviceSynchronize();
}
cudaCheckErrors("Copy free ");
}
void checkFreeMemory(int deviceCount,size_t *mem_GPU_global){
size_t memfree;
size_t memtotal;
for (int dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipMemGetInfo(&memfree,&memtotal);
if(dev==0) *mem_GPU_global=memfree;
if(memfree<memtotal/2){
mexErrMsgIdAndTxt("tvDenoise:tvdenoising:GPU","One (or more) of your GPUs is being heavily used by another program (possibly graphics-based).\n Free the GPU to run TIGRE\n");
}
cudaCheckErrors("Check mem error");
*mem_GPU_global=(memfree<*mem_GPU_global)?memfree:*mem_GPU_global;
}
*mem_GPU_global=(size_t)((double)*mem_GPU_global*0.95);
//*mem_GPU_global= insert your known number here, in bytes.
}
| 4285e1326e6d3361057d182fce96c7827a9e7a19.cu | /*-------------------------------------------------------------------------
*
* MATLAB MEX functions for TV image denoising. Check inputs and parses
* MATLAB data to C++ data.
*
*
* CODE by Imanol Luengo
* PhD student University of Nottingham
* [email protected]
* 2015
* Modified by Ander Biguri for multi-GPU
* ---------------------------------------------------------------------------
* ---------------------------------------------------------------------------
* Copyright (c) 2015, University of Bath and CERN- European Organization for
* Nuclear Research
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* ---------------------------------------------------------------------------
*
* Contact: [email protected]
* Codes : https://github.com/CERN/TIGRE
* ---------------------------------------------------------------------------
*/
// http://gpu4vision.icg.tugraz.at/papers/2010/knoll.pdf#pub47
#define MAXTREADS 1024
#define MAX_BUFFER 60
#define BLOCK_SIZE 10 // BLOCK_SIZE^3 must be smaller than MAXTREADS
#include "tvdenoising.hpp"
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
cudaDeviceReset();\
mexPrintf("%s \n",msg);\
mexErrMsgIdAndTxt("CBCT:CUDA:TVdenoising",cudaGetErrorString(__err));\
} \
} while (0)
__device__ __inline__
float divergence(const float* pz, const float* py, const float* px,
long z, long y, long x, long depth, long rows, long cols,
float dz, float dy, float dx)
{
long size2d = rows*cols;
long idx = z * size2d + y * cols + x;
float _div = 0.0f;
if ( z - 1 >= 0 ) {
_div += (pz[idx] - pz[(z-1)*size2d + y*cols + x]) / dz;
} else {
_div += pz[idx];
}
if ( y - 1 >= 0 ) {
_div += (py[idx] - py[z*size2d + (y-1)*cols + x]) / dy;
} else {
_div += py[idx];
}
if ( x - 1 >= 0 ) {
_div += (px[idx] - px[z*size2d + y*cols + (x-1)]) / dx;
} else {
_div += px[idx];
}
return _div;
}
__device__ __inline__
void gradient(const float* u, float* grad,
long z, long y, long x,
long depth, long rows, long cols,
float dz, float dy, float dx)
{
long size2d = rows*cols;
long idx = z * size2d + y * cols + x;
float uidx = u[idx];
if ( z + 1 < depth ) {
grad[0] = (u[(z+1)*size2d + y*cols + x] - uidx) / dz;
}
if ( y + 1 < rows ) {
grad[1] = (u[z*size2d + (y+1)*cols + x] - uidx) / dy;
}
if ( x + 1 < cols ) {
grad[2] = (u[z*size2d + y*cols + (x+1)] - uidx) / dx;
}
}
__global__
void update_u(const float* f, const float* pz, const float* py, const float* px, float* u,
float tau, float lambda,
long depth, long rows, long cols,
float dz, float dy, float dx)
{
long x = threadIdx.x + blockIdx.x * blockDim.x;
long y = threadIdx.y + blockIdx.y * blockDim.y;
long z = threadIdx.z + blockIdx.z * blockDim.z;
long idx = z * rows * cols + y * cols + x;
if ( x >= cols || y >= rows || z >= depth )
return;
float _div = divergence(pz, py, px, z, y, x, depth, rows, cols, dz, dy, dx);
u[idx] = u[idx] * (1.0f - tau) + tau * (f[idx] + (1.0f/lambda) * _div);
}
__global__
void update_p(const float* u, float* pz, float* py, float* px,
float tau, long depth, long rows, long cols,
float dz, float dy, float dx)
{
long x = threadIdx.x + blockIdx.x * blockDim.x;
long y = threadIdx.y + blockIdx.y * blockDim.y;
long z = threadIdx.z + blockIdx.z * blockDim.z;
long idx = z * rows * cols + y * cols + x;
if ( x >= cols || y >= rows || z >= depth )
return;
float grad[3] = {0,0,0}, q[3];
gradient(u, grad, z, y, x, depth, rows, cols, dz, dy, dx);
q[0] = pz[idx] + tau * grad[0];
q[1] = py[idx] + tau * grad[1];
q[2] = px[idx] + tau * grad[2];
float norm = fmaxf(1.0f, sqrtf(q[0] * q[0] + q[1] * q[1] + q[2] * q[2]));
pz[idx] = q[0] / norm;
py[idx] = q[1] / norm;
px[idx] = q[2] / norm;
}
// Main function
void tvdenoising(float* src, float* dst, float lambda,
const float* spacing, const long* image_size, int maxIter){
// Prepare for MultiGPU
int deviceCount = 0;
cudaGetDeviceCount(&deviceCount);
cudaCheckErrors("Device query fail");
if (deviceCount == 0) {
mexErrMsgIdAndTxt("tvDenoise:tvdenoising:GPUselect","There are no available device(s) that support CUDA\n");
}
//
// CODE assumes
// 1.-All available devices are usable by this code
// 2.-All available devices are equal, they are the same machine (warning trhown)
int dev;
char * devicenames;
cudaDeviceProp deviceProp;
for (dev = 0; dev < deviceCount; dev++) {
cudaSetDevice(dev);
cudaGetDeviceProperties(&deviceProp, dev);
if (dev>0){
if (strcmp(devicenames,deviceProp.name)!=0){
mexWarnMsgIdAndTxt("tvDenoise:tvdenoising:GPUselect","Detected one (or more) different GPUs.\n This code is not smart enough to separate the memory GPU wise if they have different computational times or memory limits.\n First GPU parameters used. If the code errors you might need to change the way GPU selection is performed. \n POCS_TV.cu line 277.");
break;
}
}
devicenames=deviceProp.name;
}
// We don't know if the devices are being used. lets check that. and only use the amount of memory we need.
size_t mem_GPU_global;
checkFreeMemory(deviceCount,&mem_GPU_global);
// %5 of free memory shoudl be enough, we have almsot no variables in these kernels
size_t total_pixels = image_size[0] * image_size[1] * image_size[2] ;
size_t mem_slice_image = sizeof(float)* image_size[0] * image_size[1] ;
size_t mem_size_image = sizeof(float)* total_pixels;
// Decide how are we handling the distribution of computation
size_t mem_img_each_GPU;
unsigned int buffer_length=1;
//Does everything fit in the GPU?
unsigned int slices_per_split;
unsigned int splits=1; // if the number does not fit in an uint, you have more serious trouble than this.
if(mem_GPU_global> 5*mem_size_image+5*mem_slice_image*buffer_length*2){
// We only need to split if we have extra GPUs
slices_per_split=(image_size[2]+deviceCount-1)/deviceCount;
mem_img_each_GPU=mem_slice_image*( (image_size[2]+deviceCount-1)/deviceCount + buffer_length*2);
}else{
// As mem_auxiliary is not expected to be a large value (for a 2000^3 image is around 28Mbytes), lets for now assume we need it all
size_t mem_free=mem_GPU_global;
splits=(unsigned int)(ceil(((float)(5*mem_size_image)/(float)(deviceCount))/mem_free));
// Now, there is an overhead here, as each splits should have 2 slices more, to accoutn for overlap of images.
// lets make sure these 2 slices fit, if they do not, add 1 to splits.
slices_per_split=(image_size[2]+deviceCount*splits-1)/(deviceCount*splits);
mem_img_each_GPU=(mem_slice_image*(slices_per_split+buffer_length*2));
// if the new stuff does not fit in the GPU, it measn we are in the edge case where adding that extra slice will overflow memory
if (mem_GPU_global< 5*mem_img_each_GPU){
// one more splot shoudl do the job, as its an edge case.
splits++;
//recompute for later
slices_per_split=(image_size[2]+deviceCount*splits-1)/(deviceCount*splits); // amountf of slices that fit on a GPU. Later we add 2 to these, as we need them for overlap
mem_img_each_GPU=(mem_slice_image*(slices_per_split+buffer_length*2));
}
// How many EXTRA buffer slices shoudl be able to fit in here??!?!
mem_free=mem_GPU_global-(5*mem_img_each_GPU);
unsigned int extra_buff=(mem_free/mem_slice_image);
buffer_length=(extra_buff/2)/5; // we need double whatever this results in, rounded down.
buffer_length=min(MAX_BUFFER,buffer_length);
mem_img_each_GPU=(mem_slice_image*(slices_per_split+buffer_length*2));
// Assert
if (mem_GPU_global< 5*mem_img_each_GPU){
mexErrMsgIdAndTxt("tvDenoise:tvdenoising:GPU","Bad assert. Logic behind spliting flawed! Please tell: [email protected]\n");
}
}
// Lets try to make the host memory pinned:
// We laredy queried the GPU and assuemd they are the same, thus shoudl have the same attributes.
int isHostRegisterSupported;
cudaDeviceGetAttribute(&isHostRegisterSupported,cudaDevAttrHostRegisterSupported,0);
if (isHostRegisterSupported & splits>1){
cudaHostRegister(src ,image_size[2]*image_size[1]*image_size[0]*sizeof(float),cudaHostRegisterPortable);
cudaHostRegister(dst ,image_size[2]*image_size[1]*image_size[0]*sizeof(float),cudaHostRegisterPortable);
}
cudaCheckErrors("Error pinning memory");
// Lets allocate auxiliary variables.
float* buffer_u, *buffer_px, *buffer_py, *buffer_pz;
float* h_px, *h_py, *h_pz, *h_u;
if(splits>1){
//These take A LOT of memory and A LOT of time to use. If we can avoid using them, better.
if (buffer_length<maxIter){ // if we do only 1 big iter, they are not needed.
mexWarnMsgIdAndTxt("tvDenoise:tvdenoising:Memory","TV dneoising requires 5 times the image memory. Your GPU(s) do not have the required memory.\n This memory will be attempted to allocate on the CPU, Whic may fail or slow the computation by a very significant amount.\n If you want to kill the execution: CTRL+C");
cudaMallocHost((void**)&h_px,image_size[0]*image_size[1]*image_size[2]*sizeof(float));
cudaCheckErrors("Malloc error on auxiliary variables on CPU.\n Your image is too big to use SART_TV or im3Ddenoise in your current machine");
cudaMallocHost((void**)&h_py,image_size[0]*image_size[1]*image_size[2]*sizeof(float));
cudaCheckErrors("Malloc error on auxiliary variables on CPU.\n Your image is too big to use SART_TV or im3Ddenoise in your current machine");
cudaMallocHost((void**)&h_pz,image_size[0]*image_size[1]*image_size[2]*sizeof(float));
cudaCheckErrors("Malloc error on auxiliary variables on CPU.\n Your image is too big to use SART_TV or im3Ddenoise in your current machine");
}
h_u=dst;
}else{
cudaMallocHost((void**)&buffer_u, image_size[0]*image_size[1]*sizeof(float));
cudaMallocHost((void**)&buffer_px,image_size[0]*image_size[1]*sizeof(float));
cudaMallocHost((void**)&buffer_py,image_size[0]*image_size[1]*sizeof(float));
cudaMallocHost((void**)&buffer_pz,image_size[0]*image_size[1]*sizeof(float));
}
// We shoudl be good to go memory wise.
float** d_src =(float**)malloc(deviceCount*sizeof(float*));
float** d_u =(float**)malloc(deviceCount*sizeof(float*));
float** d_px =(float**)malloc(deviceCount*sizeof(float*));
float** d_py =(float**)malloc(deviceCount*sizeof(float*));
float** d_pz =(float**)malloc(deviceCount*sizeof(float*));
//Malloc
for(dev=0;dev<deviceCount;dev++){
cudaSetDevice(dev);
// F
cudaMalloc((void**)&d_src[dev], mem_img_each_GPU);
// U
cudaMalloc((void**)&d_u [dev], mem_img_each_GPU);
// PX
cudaMalloc((void**)&d_px[dev], mem_img_each_GPU);
// PY
cudaMalloc((void**)&d_py[dev], mem_img_each_GPU);
// PZ
cudaMalloc((void**)&d_pz[dev], mem_img_each_GPU);
}
cudaDeviceSynchronize();
cudaCheckErrors("Malloc error");
// Create streams
int nStream_device=5;
int nStreams=deviceCount*nStream_device;
cudaStream_t* stream=(cudaStream_t*)malloc(nStreams*sizeof(cudaStream_t));
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
for (int i = 0; i < nStream_device; ++i){
cudaStreamCreate(&stream[i+dev*nStream_device]);
}
}
cudaCheckErrors("Stream creation fail");
// Allocate CPU buffer if needed, warn user if not.
unsigned int curr_slices;
unsigned long long curr_pixels;
size_t linear_idx_start;
unsigned long long buffer_pixels=buffer_length*image_size[0]*image_size[1];
unsigned long long* offset_device=(unsigned long long*)malloc(deviceCount*sizeof(unsigned long long));
unsigned long long* offset_host =(unsigned long long*)malloc(deviceCount*sizeof(unsigned long long));
unsigned long long* bytes_device =(unsigned long long*)malloc(deviceCount*sizeof(unsigned long long));
bool is_first_chunk;
bool is_last_chunk;
float tau2, tau1;
for(unsigned int i=0;i<maxIter;i+=(buffer_length)){
for(unsigned int sp=0;sp<splits;sp++){
// For each iteration we need to comptue all the image. The ordering of these loops
// need to be like this due to the boudnign layers between slpits. If more than 1 split is needed
// for each GPU then there is no other way that taking the entire memory out of GPU and putting it back.
// If the memory can be shared ebtween GPUs fully without extra splits, then there is an easy way of syncronizing the memory
// Copy image to memory
for (dev = 0; dev < deviceCount; dev++){
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
curr_pixels=curr_slices*image_size[0]*image_size[1];
linear_idx_start=image_size[0]*image_size[1]*slices_per_split*(sp*deviceCount+dev);
// Check if its the first or last chunck
is_last_chunk=!((sp*deviceCount+dev)<deviceCount*splits-1);
is_first_chunk=!(sp*deviceCount+dev);
// lets compute where we start copyes and how much. This avoids 3 calls to Memcpy
offset_device[dev]=buffer_pixels*is_first_chunk;
offset_host[dev]=linear_idx_start-buffer_pixels*!is_first_chunk;
bytes_device[dev]=curr_pixels+buffer_pixels*!is_first_chunk+buffer_pixels*!is_last_chunk;
}
if(i==0){
// Precompute indices and needed bytes
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaMemcpyAsync(d_src[dev]+offset_device[dev], src+offset_host[dev] , bytes_device[dev]*sizeof(float), cudaMemcpyHostToDevice,stream[dev*nStream_device+1]);
}
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
// All these are async
cudaMemcpyAsync(d_u[dev] +offset_device[dev], d_src[dev]+offset_device[dev], bytes_device[dev]*sizeof(float), cudaMemcpyDeviceToDevice,stream[dev*nStream_device+1]);
cudaMemsetAsync(d_px[dev], 0, mem_img_each_GPU,stream[dev*nStream_device]);
cudaMemsetAsync(d_py[dev], 0, mem_img_each_GPU,stream[dev*nStream_device]);
cudaMemsetAsync(d_pz[dev], 0, mem_img_each_GPU,stream[dev*nStream_device]);
}
// we need all the stream to finish
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaDeviceSynchronize();
}
cudaCheckErrors("Memcpy failure");
}
// if we need to split and its not the first iteration, then we need to copy from Host memory.
// d_src is the original image, with no change.
if (splits>1 & i>0){
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaStreamSynchronize(stream[dev*nStream_device+1]);
cudaMemcpyAsync(d_u [dev] +offset_device[dev], h_u +offset_host[dev], bytes_device[dev]*sizeof(float), cudaMemcpyHostToDevice,stream[dev*nStream_device+1]);
}
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaStreamSynchronize(stream[dev*nStream_device+2]);
cudaMemcpyAsync(d_px[dev]+offset_device[dev], h_px+offset_host[dev], bytes_device[dev]*sizeof(float), cudaMemcpyHostToDevice,stream[dev*nStream_device+2]);
}
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaStreamSynchronize(stream[dev*nStream_device+3]);
cudaMemcpyAsync(d_py[dev] +offset_device[dev], h_py+offset_host[dev], bytes_device[dev]*sizeof(float), cudaMemcpyHostToDevice,stream[dev*nStream_device+3]);
}
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaStreamSynchronize(stream[dev*nStream_device+4]);
cudaMemcpyAsync(d_pz[dev] +offset_device[dev], h_pz+offset_host[dev], bytes_device[dev]*sizeof(float), cudaMemcpyHostToDevice,stream[dev*nStream_device+4]);
}
for (dev = 0; dev < deviceCount; dev++){
cudaStreamSynchronize(stream[dev*nStream_device+1]);
cudaMemcpyAsync(d_src[dev]+offset_device[dev], src +offset_host[dev], bytes_device[dev]*sizeof(float), cudaMemcpyHostToDevice,stream[dev*nStream_device+1]);
}
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaDeviceSynchronize();
cudaCheckErrors("Memcpy failure on multi split");
}
}
// Inter interations.
for(unsigned int ib=0; (ib<(buffer_length)) && ((i+ib)<maxIter); ib++){
tau2 = 0.3f + 0.02f * (i+ib);
tau1 = (1.f/tau2) * ((1.f/6.f) - (5.f/(15.f+(i+ib))));
// bdim and gdim
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
dim3 block(BLOCK_SIZE, BLOCK_SIZE, BLOCK_SIZE);
dim3 grid((image_size[0]+block.x-1)/block.x, (image_size[1]+block.y-1)/block.y, (curr_slices+buffer_length*2+block.z-1)/block.z);
update_u<<<grid, block,0,stream[dev*nStream_device]>>>(d_src[dev], d_pz[dev], d_py[dev], d_px[dev], d_u[dev], tau1, lambda,
(long)(curr_slices+buffer_length*2), image_size[1],image_size[0],
spacing[2], spacing[1], spacing[0]);
}
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
dim3 block(BLOCK_SIZE, BLOCK_SIZE, BLOCK_SIZE);
dim3 grid((image_size[0]+block.x-1)/block.x, (image_size[1]+block.y-1)/block.y, (curr_slices+buffer_length*2+block.z-1)/block.z);
update_p<<<grid, block,0,stream[dev*nStream_device]>>>(d_u[dev], d_pz[dev], d_py[dev], d_px[dev], tau2,
(long)(curr_slices+buffer_length*2), image_size[1], image_size[0],
spacing[2], spacing[1], spacing[0]);
}
}// END internal iter
// Syncronize mathematics, make sure bounding pixels are correct
for(dev=0; dev<deviceCount;dev++){
cudaSetDevice(dev);
cudaDeviceSynchronize();
}
if(splits==1){
for(dev=0; dev<deviceCount;dev++){
if (dev<deviceCount-1){
// U
cudaSetDevice(dev+1);
cudaMemcpyAsync(buffer_u , d_u[dev+1] , buffer_pixels*sizeof(float), cudaMemcpyDeviceToHost,stream[(dev+1)*nStream_device+1]);
cudaMemcpyAsync(buffer_px, d_px[dev+1], buffer_pixels*sizeof(float), cudaMemcpyDeviceToHost,stream[(dev+1)*nStream_device+2]);
cudaMemcpyAsync(buffer_py, d_py[dev+1], buffer_pixels*sizeof(float), cudaMemcpyDeviceToHost,stream[(dev+1)*nStream_device+3]);
cudaMemcpyAsync(buffer_pz, d_pz[dev+1], buffer_pixels*sizeof(float), cudaMemcpyDeviceToHost,stream[(dev+1)*nStream_device+4]);
cudaSetDevice(dev);
cudaStreamSynchronize(stream[(dev+1)*nStream_device+1]);
cudaMemcpyAsync(d_u[dev] +slices_per_split+buffer_pixels, buffer_u , buffer_pixels*sizeof(float), cudaMemcpyHostToDevice,stream[(dev)*nStream_device+1]);
cudaStreamSynchronize(stream[(dev+1)*nStream_device+2]);
cudaMemcpyAsync(d_px[dev]+slices_per_split+buffer_pixels, buffer_px, buffer_pixels*sizeof(float), cudaMemcpyHostToDevice,stream[(dev)*nStream_device+2]);
cudaStreamSynchronize(stream[(dev+1)*nStream_device+3]);
cudaMemcpyAsync(d_py[dev]+slices_per_split+buffer_pixels, buffer_py, buffer_pixels*sizeof(float), cudaMemcpyHostToDevice,stream[(dev)*nStream_device+3]);
cudaStreamSynchronize(stream[(dev+1)*nStream_device+4]);
cudaMemcpyAsync(d_pz[dev]+slices_per_split+buffer_pixels, buffer_pz, buffer_pixels*sizeof(float), cudaMemcpyHostToDevice,stream[(dev)*nStream_device+4]);
}
cudaDeviceSynchronize();
if (dev>0){
// U
cudaSetDevice(dev-1);
cudaMemcpyAsync(buffer_u, d_u[dev-1] +slices_per_split+buffer_pixels, buffer_pixels*sizeof(float), cudaMemcpyDeviceToHost,stream[(dev-1)*nStream_device+1]);
cudaMemcpyAsync(buffer_px, d_px[dev-1]+slices_per_split+buffer_pixels, buffer_pixels*sizeof(float), cudaMemcpyDeviceToHost,stream[(dev-1)*nStream_device+2]);
cudaMemcpyAsync(buffer_py, d_py[dev-1]+slices_per_split+buffer_pixels, buffer_pixels*sizeof(float), cudaMemcpyDeviceToHost,stream[(dev-1)*nStream_device+3]);
cudaMemcpyAsync(buffer_pz, d_pz[dev-1]+slices_per_split+buffer_pixels, buffer_pixels*sizeof(float), cudaMemcpyDeviceToHost,stream[(dev-1)*nStream_device+4]);
cudaSetDevice(dev);
cudaStreamSynchronize(stream[(dev-1)*nStream_device+1]);
cudaMemcpyAsync(d_u[dev] ,buffer_u , buffer_pixels*sizeof(float), cudaMemcpyHostToDevice,stream[(dev)*nStream_device+1]);
cudaStreamSynchronize(stream[(dev-1)*nStream_device+2]);
cudaMemcpyAsync(d_px[dev],buffer_px, buffer_pixels*sizeof(float), cudaMemcpyHostToDevice,stream[(dev)*nStream_device+2]);
cudaStreamSynchronize(stream[(dev-1)*nStream_device+3]);
cudaMemcpyAsync(d_py[dev],buffer_py, buffer_pixels*sizeof(float), cudaMemcpyHostToDevice,stream[(dev)*nStream_device+3]);
cudaStreamSynchronize(stream[(dev-1)*nStream_device+4]);
cudaMemcpyAsync(d_pz[dev],buffer_pz, buffer_pixels*sizeof(float), cudaMemcpyHostToDevice,stream[(dev)*nStream_device+4]);
}
}
}else{
// We need to take it out :(
for(dev=0; dev<deviceCount;dev++){
cudaSetDevice(dev);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
linear_idx_start=image_size[0]*image_size[1]*slices_per_split*(sp*deviceCount+dev);
total_pixels=curr_slices*image_size[0]*image_size[1];
cudaMemcpyAsync(&h_u[linear_idx_start], d_u [dev]+buffer_pixels,total_pixels*sizeof(float), cudaMemcpyDeviceToHost,stream[dev*nStream_device+1]);
}
if ((i+buffer_length)<maxIter){ // If its the last iteration, we dont need to get these out.
for(dev=0; dev<deviceCount;dev++){
cudaSetDevice(dev);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
linear_idx_start=image_size[0]*image_size[1]*slices_per_split*(sp*deviceCount+dev);
total_pixels=curr_slices*image_size[0]*image_size[1];
cudaMemcpyAsync(&h_px[linear_idx_start], d_px[dev]+buffer_pixels,total_pixels*sizeof(float), cudaMemcpyDeviceToHost,stream[dev*nStream_device+2]);
cudaMemcpyAsync(&h_py[linear_idx_start], d_py[dev]+buffer_pixels,total_pixels*sizeof(float), cudaMemcpyDeviceToHost,stream[dev*nStream_device+3]);
cudaMemcpyAsync(&h_pz[linear_idx_start], d_pz[dev]+buffer_pixels,total_pixels*sizeof(float), cudaMemcpyDeviceToHost,stream[dev*nStream_device+4]);
}
}
}
}//END splits
}//END main iter
for(dev=0; dev<deviceCount;dev++){
cudaSetDevice(dev);
cudaDeviceSynchronize();
}
cudaCheckErrors("TV minimization");
if(splits==1){
for(dev=0; dev<deviceCount;dev++){
cudaSetDevice(dev);
curr_slices=((dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*dev;
total_pixels=curr_slices*image_size[0]*image_size[1];
cudaMemcpyAsync(dst+slices_per_split*image_size[0]*image_size[1]*dev, d_u[dev]+buffer_pixels,total_pixels*sizeof(float), cudaMemcpyDeviceToHost,stream[dev*nStream_device+1]);
}
}
for(dev=0; dev<deviceCount;dev++){
cudaSetDevice(dev);
cudaDeviceSynchronize();
}
cudaCheckErrors("Copy result back");
for(dev=0; dev<deviceCount;dev++){
cudaFree(d_src[dev]);
cudaFree(d_u [dev]);
cudaFree(d_pz[dev]);
cudaFree(d_py[dev]);
cudaFree(d_px[dev]);
}
if(splits>1 && buffer_length<maxIter){
cudaFreeHost(h_px);
cudaFreeHost(h_py);
cudaFreeHost(h_pz);
}else if(splits==1){
cudaFreeHost(buffer_u);
cudaFreeHost(buffer_px);
cudaFreeHost(buffer_py);
cudaFreeHost(buffer_pz);
}
for (int i = 0; i < nStreams; ++i)
cudaStreamDestroy(stream[i]) ;
if (isHostRegisterSupported & splits>1){
cudaHostUnregister(src);
cudaHostUnregister(dst);
}
for(dev=0; dev<deviceCount;dev++){
cudaSetDevice(dev);
cudaDeviceSynchronize();
}
cudaCheckErrors("Copy free ");
}
void checkFreeMemory(int deviceCount,size_t *mem_GPU_global){
size_t memfree;
size_t memtotal;
for (int dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaMemGetInfo(&memfree,&memtotal);
if(dev==0) *mem_GPU_global=memfree;
if(memfree<memtotal/2){
mexErrMsgIdAndTxt("tvDenoise:tvdenoising:GPU","One (or more) of your GPUs is being heavily used by another program (possibly graphics-based).\n Free the GPU to run TIGRE\n");
}
cudaCheckErrors("Check mem error");
*mem_GPU_global=(memfree<*mem_GPU_global)?memfree:*mem_GPU_global;
}
*mem_GPU_global=(size_t)((double)*mem_GPU_global*0.95);
//*mem_GPU_global= insert your known number here, in bytes.
}
|
a2fc81ab4321bc85ec947b9bf644b8563cac1799.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
global void cuda_hello(){
printf("Hello World from GPU!\n");
}
int main() {
hipLaunchKernelGGL(( cuda_hello), dim3(1),dim3(1), 0, 0, );
return 0;
}
| a2fc81ab4321bc85ec947b9bf644b8563cac1799.cu | global void cuda_hello(){
printf("Hello World from GPU!\n");
}
int main() {
cuda_hello<<<1,1>>>();
return 0;
}
|
d43438eee1c80c6df64c60d6cb02ae066d98d705.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "pointwise_hist1.cuh"
#include "split_properties_helpers.cuh"
#include <hip/hip_cooperative_groups.h>
#include <catboost/cuda/cuda_lib/kernel/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/instructions.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
using namespace cooperative_groups;
namespace NKernel
{
template <int InnerHistBitsCount,
int BlockSize>
struct TPointHistOneByte {
volatile float* __restrict__ Buffer;
__forceinline__ __device__ int SliceOffset() {
const int warpOffset = 1024 * (threadIdx.x / 32);
const int blocks = 8 >> InnerHistBitsCount;
const int innerHistStart = (threadIdx.x & ((blocks - 1) << (InnerHistBitsCount + 2)));
return warpOffset + innerHistStart;
}
__forceinline__ __device__ TPointHistOneByte(float* buff) {
const int HIST_SIZE = 32 * BlockSize;
#pragma unroll 8
for (int i = threadIdx.x; i < HIST_SIZE; i += BlockSize) {
buff[i] = 0;
}
Buffer = buff + SliceOffset();
__syncthreads();
}
__device__ void AddPoint(ui32 ci, const float t) {
thread_block_tile<32> syncTile = tiled_partition<32>(this_thread_block());
#pragma unroll
for (int i = 0; i < 4; i++) {
short f = (threadIdx.x + i) & 3;
int bin = bfe(ci, 24 - 8 * f, 8);
const float statToAdd = (bin >> (5 + InnerHistBitsCount)) == 0 ? t : 0;
const int mask = (1 << InnerHistBitsCount) - 1;
const int higherBin = (bin >> 5) & mask;
int offset = 4 * higherBin + f + ((bin & 31) << 5);
if (InnerHistBitsCount > 0) {
#pragma unroll
for (int k = 0; k < (1 << InnerHistBitsCount); ++k) {
const int pass = ((threadIdx.x >> 2) + k) & mask;
syncTile.sync();
if (pass == higherBin) {
Buffer[offset] += statToAdd;
}
}
} else {
syncTile.sync();
Buffer[offset] += statToAdd;
}
}
}
__forceinline__ __device__ void Reduce() {
Buffer -= SliceOffset();
__syncthreads();
{
const int warpHistSize = 1024;
for (int start = threadIdx.x; start < warpHistSize; start += BlockSize) {
float sum = 0;
//12 iterations
#pragma unroll 12
for (int i = start; i < 32 * BlockSize; i += warpHistSize) {
sum += Buffer[i];
}
Buffer[warpHistSize + start] = sum;
}
}
__syncthreads();
//now we have only 1024 entries hist
const int warpHistBlockCount = 8 >> InnerHistBitsCount;
const int fold = threadIdx.x;
const int histSize = 1 << (5 + InnerHistBitsCount);
float sum[4];
#pragma unroll
for (int i = 0; i < 4; ++i) {
sum[i] = 0.0f;
}
if (fold < histSize) {
const int warpHistSize = 1024;
const int lowerBitsOffset = (fold & 31) << 5;
const int higherBin = (fold >> 5) & ((1 << InnerHistBitsCount) - 1);
const int blockSize = 4 * (1 << InnerHistBitsCount);
const volatile float* src = Buffer + warpHistSize + lowerBitsOffset + 4 * higherBin;
#pragma unroll
for (int block = 0; block < warpHistBlockCount; ++block) {
#pragma unroll
for (int i = 0; i < 4; ++i) {
sum[i] += src[i + block * blockSize];
}
}
}
__syncthreads();
if (fold < histSize) {
for (int i = 0; i < 4; ++i) {
Buffer[histSize * i + fold] = sum[i];
}
}
__syncthreads();
}
};
template <int BlockSize>
struct TPointHistHalfByte {
volatile float* Buffer;
__forceinline__ __device__ int SliceOffset() {
const int warpOffset = 512 * (threadIdx.x / 32);
const int innerHistStart = threadIdx.x & 24;
return warpOffset + innerHistStart;
}
__forceinline__ __device__ TPointHistHalfByte(float* buff) {
const int histSize = 16 * BlockSize;
for (int i = threadIdx.x; i < histSize; i += BlockSize) {
buff[i] = 0;
}
__syncthreads();
Buffer = buff + SliceOffset();
}
__forceinline__ __device__ void AddPoint(ui32 ci, const float t) {
thread_block_tile<8> addToHistTile = tiled_partition<8>(this_thread_block());
#pragma unroll 4
for (int i = 0; i < 8; i++) {
const int f = (threadIdx.x + i) & 7;
short bin = bfe(ci, 28 - 4 * f, 4);
bin <<= 5;
bin += f;
Buffer[bin] += t;
addToHistTile.sync();
}
}
__device__ void Reduce() {
Buffer -= SliceOffset();
__syncthreads();
{
const int HIST_SIZE = 16 * BlockSize;
float sum = 0;
if (threadIdx.x < 512) {
for (int i = threadIdx.x; i < HIST_SIZE; i += 512) {
sum += Buffer[i];
}
}
__syncthreads();
if (threadIdx.x < 512) {
Buffer[threadIdx.x] = sum;
}
__syncthreads();
}
const int fold = (threadIdx.x >> 3) & 15;
float sum = 0.0f;
if (threadIdx.x < 128) {
const int featureId = threadIdx.x & 7;
#pragma unroll
for (int group = 0; group < 4; ++group) {
sum += Buffer[32 * fold + featureId + 8 * group];
}
}
__syncthreads();
if (threadIdx.x < 128) {
Buffer[threadIdx.x] = sum;
}
__syncthreads();
}
};
template <int StripeSize, int OuterUnroll, int N, typename THist>
__forceinline__ __device__ void ComputeHistogram(int BlocksPerFeature, const ui32* __restrict__ indices,
int offset, int dsSize,
const float* __restrict__ target,
const ui32* __restrict__ cindex,
float* result) {
target += offset;
indices += offset;
THist hist(result);
int i = (threadIdx.x & 31) + (threadIdx.x / 32) * 32;
//all operations should be warp-aligned
//first: first warp make memory access aligned. it load first 32 - offset % 32 elements.
{
int lastId = min(dsSize, 32 - (offset & 31));
if ((blockIdx.x % BlocksPerFeature) == 0) {
const int index = i < lastId ? __ldg(indices + i) : 0;
const ui32 ci = i < lastId ? __ldg(cindex + index) : 0;
const float wt = i < lastId ? __ldg(target + i) : 0;
hist.AddPoint(ci, wt);
}
dsSize = max(dsSize - lastId, 0);
indices += lastId;
target += lastId;
}
//now lets align end
const int unalignedTail = (dsSize & 31);
if (unalignedTail != 0) {
if ((blockIdx.x % BlocksPerFeature) == 0)
{
const int tailOffset = dsSize - unalignedTail;
const int index = i < unalignedTail ? __ldg(indices + tailOffset + i) : 0;
const ui32 ci = i < unalignedTail ? __ldg(cindex + index) : 0;
const float wt = i < unalignedTail ? __ldg(target + tailOffset + i) : 0;
hist.AddPoint(ci, wt);
}
}
dsSize -= unalignedTail;
if (blockIdx.x % BlocksPerFeature == 0 && dsSize <= 0) {
__syncthreads();
hist.Reduce();
return;
}
indices += (blockIdx.x % BlocksPerFeature) * StripeSize;
target += (blockIdx.x % BlocksPerFeature) * StripeSize;
dsSize = max(dsSize - (blockIdx.x % BlocksPerFeature) * StripeSize, 0);
const int stripe = StripeSize * BlocksPerFeature;
if (dsSize) {
int iteration_count = (dsSize - i + (stripe - 1)) / stripe;
int blocked_iteration_count = ((dsSize - (i | 31) + (stripe - 1)) / stripe) / N;
target += i;
indices += i;
#pragma unroll OuterUnroll
for (int j = 0; j < blocked_iteration_count; ++j) {
ui32 local_index[N];
#pragma unroll
for (int k = 0; k < N; k++) {
local_index[k] = __ldg(indices + stripe * k);
}
ui32 local_ci[N];
float local_wt[N];
#pragma unroll
for (int k = 0; k < N; ++k) {
local_ci[k] = __ldg(cindex + local_index[k]);
local_wt[k] = __ldg(target + stripe * k);
}
#pragma unroll
for (int k = 0; k < N; ++k) {
hist.AddPoint(local_ci[k], local_wt[k]);
}
indices += stripe * N;
target += stripe * N;
}
for (int k = blocked_iteration_count * N; k < iteration_count; ++k) {
const int index = __ldg(indices);
ui32 ci = __ldg(cindex + index);
float wt = __ldg(target);
hist.AddPoint(ci, wt);
indices += stripe;
target += stripe;
}
__syncthreads();
hist.Reduce();
}
}
template <int StripeSize, int OuterUnroll, typename THist>
__forceinline__ __device__ void ComputeHistogram64BitLoads(int BlocksPerFeature, const ui32* __restrict__ indices,
int offset, int dsSize,
const float* __restrict__ target,
const ui32* __restrict__ cindex,
float* result) {
target += offset;
indices += offset;
THist hist(result);
if (dsSize) {
//first: first warp make memory access aligned. it load first 32 - offset % 32 elements.
{
int lastId = min(dsSize, 128 - (offset & 127));
int colId = (threadIdx.x & 31) + (threadIdx.x / 32 ) * 32;
if ((blockIdx.x % BlocksPerFeature) == 0)
{
for (; (colId < 128); colId += blockDim.x)
{
const int index = colId < lastId ? __ldg(indices + colId) : 0;
const ui32 ci = colId < lastId ? __ldg(cindex + index) : 0;
const float wt = colId < lastId ? __ldg(target + colId) : 0;
hist.AddPoint(ci, wt);
}
}
dsSize = max(dsSize - lastId, 0);
indices += lastId;
target += lastId;
}
//now lets align end
const int unalignedTail = (dsSize & 63);
if (unalignedTail != 0) {
if ((blockIdx.x % BlocksPerFeature) == 0)
{
int colId = (threadIdx.x & 31) + (threadIdx.x / 32 ) * 32;
const int tailOffset = dsSize - unalignedTail;
for (; (colId < 64); colId += blockDim.x) {
const int index = colId < unalignedTail ? __ldg(indices + tailOffset + colId) : 0;
const ui32 ci = colId < unalignedTail ? __ldg(cindex + index) : 0;
const float wt = colId < unalignedTail ? __ldg(target + tailOffset + colId) : 0;
hist.AddPoint(ci, wt);
}
}
}
dsSize -= unalignedTail;
if (dsSize <= 0) {
if ((blockIdx.x % BlocksPerFeature) == 0) {
__syncthreads();
hist.Reduce();
}
return;
}
indices += (blockIdx.x % BlocksPerFeature) * StripeSize * 2;
target += (blockIdx.x % BlocksPerFeature) * StripeSize * 2;
const int stripe = StripeSize * BlocksPerFeature * 2;
dsSize = max(dsSize - (blockIdx.x % BlocksPerFeature) * StripeSize * 2, 0);
if (dsSize) {
int iterCount;
{
const int i = 2 * ((threadIdx.x & 31) + (threadIdx.x / 32) * 32);
target += i;
indices += i;
iterCount = (dsSize - i + (stripe - 1)) / stripe;
}
#pragma unroll OuterUnroll
for (int j = 0; j < iterCount; ++j) {
const uint2 localIndices = __ldg((uint2*) indices);
const ui32 firstBin = __ldg(cindex + localIndices.x);
const ui32 secondBin = __ldg(cindex + localIndices.y);
const float2 localTarget = __ldg((float2* )(target));
hist.AddPoint(firstBin, localTarget.x);
hist.AddPoint(secondBin, localTarget.y);
indices += stripe;
target += stripe;
}
__syncthreads();
hist.Reduce();
}
}
}
template <int BlockSize,
int InnerHistBitsCount,
bool Use64BitLoads>
__forceinline__ __device__ void ComputeSplitPropertiesPass(int BlocksPerFeature, const TCFeature* __restrict__ feature,
const ui32* __restrict__ cindex,
const float* __restrict__ target,
const ui32* __restrict__ indices,
const TDataPartition* __restrict__ partition, int fCount,
float* binSumsForPart,
float* __restrict__ smem) {
using THist = TPointHistOneByte<InnerHistBitsCount, BlockSize>;
if (Use64BitLoads) {
#if __CUDA_ARCH__ < 300
const int outerUnroll = 2;
#elif __CUDA_ARCH__ <= 350
const int outerUnroll = 2;
#else
const int outerUnroll = InnerHistBitsCount == 0 ? 4 : 2;
#endif
const int size = partition->Size;
const int offset = partition->Offset;
ComputeHistogram64BitLoads < BlockSize, outerUnroll, THist > (BlocksPerFeature,
indices,
offset,
size,
target,
cindex,
smem);
} else {
#if __CUDA_ARCH__ < 300
const int innerUnroll = InnerHistBitsCount == 0 ? 4 : 2;
const int outerUnroll = 2;
#elif __CUDA_ARCH__ <= 350
const int innerUnroll = InnerHistBitsCount == 0 ? 8 : 4;
const int outerUnroll = 2;
#else
const int innerUnroll = 4;
const int outerUnroll = 2;
#endif
ComputeHistogram<BlockSize, outerUnroll, innerUnroll, THist>(BlocksPerFeature,
indices,
partition->Offset,
partition->Size,
target,
cindex,
smem);
}
__syncthreads();
const int fold = threadIdx.x;
const int histSize = 1 << (5 + InnerHistBitsCount);
#pragma unroll 4
for (int fid = 0; fid < fCount; ++fid) {
if (fold < feature[fid].Folds) {
const float val = smem[fid * histSize + fold];
if (abs(val) > 1e-20f) {
if (BlocksPerFeature > 1) {
atomicAdd(binSumsForPart + (feature[fid].FirstFoldIndex + fold), val);
} else {
WriteThrough(binSumsForPart + (feature[fid].FirstFoldIndex + fold), val);
}
}
}
}
}
#define DECLARE_PASS(I, M, USE_64_BIT_LOAD) \
ComputeSplitPropertiesPass<BlockSize, I, USE_64_BIT_LOAD>(M, feature, cindex, target, indices, partition, fCount, binSums, &counters[0]);
template <int BlockSize, bool IsFullPass>
#if __CUDA_ARCH__ == 600
__launch_bounds__(BlockSize, 1)
#elif __CUDA_ARCH__ >= 520
__launch_bounds__(BlockSize, 2)
#else
__launch_bounds__(BlockSize, 1)
#endif
__global__ void ComputeSplitPropertiesNBImpl(int M, const TCFeature* __restrict__ feature, int fCount,
const ui32* __restrict__ cindex,
const float* __restrict__ target,
const ui32* __restrict__ indices,
const TDataPartition* __restrict__ partition,
float* __restrict__ binSums,
const int totalFeatureCount) {
TPointwisePartOffsetsHelper helper(gridDim.z);
helper.ShiftPartAndBinSumsPtr(partition, binSums, totalFeatureCount, IsFullPass, 1);
feature += (blockIdx.x / M) * 4;
cindex += feature->Offset;
fCount = min(fCount - (blockIdx.x / M) * 4, 4);
__shared__ float counters[32 * BlockSize];
const int maxBinCount = GetMaxBinCount(feature, fCount, (int*) &counters[0]);
__syncthreads();
//CatBoost always use direct loads on first pass of histograms calculation and for this step 64-bits loads are almost x2 faster
#if __CUDA_ARCH__ > 350
const bool use64BitLoad = IsFullPass;// float2 for target/indices/weights
#else
const bool use64BitLoad = false;
#endif
if (partition->Size) {
if (maxBinCount <= 32) {
DECLARE_PASS(0, M, use64BitLoad);
} else if (maxBinCount <= 64) {
DECLARE_PASS(1, M, false);
} else if (maxBinCount <= 128) {
DECLARE_PASS(2, M, false);
} else {
DECLARE_PASS(3, M, false);
}
}
}
template <int BlockSize, bool IsFullPass>
#if __CUDA_ARCH__ >= 520
__launch_bounds__(BlockSize, 2)
#else
__launch_bounds__(BlockSize, 1)
#endif
__global__ void ComputeSplitPropertiesBImpl(int M,
const TCFeature* __restrict__ feature, int fCount, const ui32* __restrict__ cindex,
const float* __restrict__ target,
const ui32* __restrict__ indices,
const TDataPartition* __restrict__ partition,
float* __restrict__ binSums,
int totalFeatureCount) {
TPointwisePartOffsetsHelper helper(gridDim.z);
helper.ShiftPartAndBinSumsPtr(partition, binSums, totalFeatureCount, IsFullPass, 1);
feature += (blockIdx.x / M) * 32;
cindex += feature->Offset;
fCount = min(fCount - (blockIdx.x / M) * 32, 32);
__shared__ float counters[16 * BlockSize];
if (partition->Size) {
using THist = TPointHistHalfByte<BlockSize>;
#if __CUDA_ARCH__ > 350
const bool use64bitLoad = IsFullPass;
#else
const bool use64bitLoad = false;
#endif
if (use64bitLoad) {
//full pass
#if __CUDA_ARCH__ <= 350
const int outerUnroll = 1;
#else
const int outerUnroll = 1;
#endif
ComputeHistogram64BitLoads < BlockSize, outerUnroll, THist > (M, indices, partition->Offset, partition->Size, target, cindex, &counters[0]);
} else {
#if __CUDA_ARCH__ <= 300
const int innerUnroll = 2;
const int outerUnroll = 1;
#elif __CUDA_ARCH__ <= 350
const int innerUnroll = 4;
const int outerUnroll = 1;
#else
const int innerUnroll = 1;
const int outerUnroll = 1;
#endif
ComputeHistogram < BlockSize, outerUnroll, innerUnroll, THist > (M, indices,
partition->Offset,
partition->Size,
target,
cindex,
&counters[0]);
}
ui32 fid = threadIdx.x;
if (fid < fCount) {
const int groupId = fid / 4;
const int fMask = 1 << (3 - (fid & 3));
float sum = 0.f;
#pragma uroll
for (int i = 0; i < 16; i++) {
if (!(i & fMask)) {
sum += counters[8 * i + groupId];
}
}
if (abs(sum) > 1e-20f) {
if (M > 1) {
atomicAdd(binSums + feature[fid].FirstFoldIndex, sum);
} else {
binSums[feature[fid].FirstFoldIndex] = sum;
}
}
}
}
}
template <int BlockSize,
int BlocksPerFeatureCount>
inline void RunComputeHist1NonBinaryKernel(const TCFeature* nbFeatures, int nbCount,
const ui32* cindex,
const float* target, const ui32* indices,
const TDataPartition* partition,
float* binSums,
const int binFeatureCount,
bool fullPass,
TCudaStream stream,
dim3 numBlocks)
{
if (fullPass) {
ComputeSplitPropertiesNBImpl < BlockSize, true > << <numBlocks, BlockSize, 0, stream>>>(BlocksPerFeatureCount,
nbFeatures, nbCount, cindex, target,
indices, partition, binSums, binFeatureCount
);
} else {
ComputeSplitPropertiesNBImpl < BlockSize, false > << <numBlocks, BlockSize, 0, stream>>>( BlocksPerFeatureCount,
nbFeatures, nbCount, cindex, target,
indices, partition, binSums, binFeatureCount
);
}
}
template <int BlockSize, int BlocksPerFeatureCount>
void RunComputeHist1BinaryKernel(const TCFeature* bFeatures, int bCount,
const ui32* cindex,
const float* target, const ui32* indices,
const TDataPartition* partition,
float* binSums,
int histLineSize,
bool fullPass,
TCudaStream stream,
dim3 numBlocks) {
if (fullPass) {
ComputeSplitPropertiesBImpl < BlockSize, true > << <numBlocks, BlockSize, 0, stream>>>(BlocksPerFeatureCount, bFeatures, bCount, cindex, target, indices, partition, binSums, histLineSize);
} else {
ComputeSplitPropertiesBImpl < BlockSize, false > << <numBlocks, BlockSize, 0, stream>>>(BlocksPerFeatureCount, bFeatures, bCount, cindex, target, indices, partition, binSums, histLineSize);
}
};
template <int BlockSize, bool IsFullPass>
#if __CUDA_ARCH__ >= 520
__launch_bounds__(BlockSize, 2)
#else
__launch_bounds__(BlockSize, 1)
#endif
__global__ void ComputeSplitPropertiesHalfByteImpl(
int M,
const TCFeature* __restrict__ feature, int fCount,
const ui32* __restrict__ cindex,
const float* __restrict__ target,
const ui32* __restrict__ indices,
const TDataPartition* __restrict__ partition,
float* __restrict__ binSums,
const int totalFeatureCount) {
TPointwisePartOffsetsHelper helper(gridDim.z);
helper.ShiftPartAndBinSumsPtr(partition, binSums, totalFeatureCount, IsFullPass, 1);
feature += (blockIdx.x / M) * 8;
cindex += feature->Offset;
fCount = min(fCount - (blockIdx.x / M) * 8, 8);
__shared__ float smem[16 * BlockSize];
if (partition->Size) {
using THist = TPointHistHalfByte<BlockSize>;
#if __CUDA_ARCH__ > 350
const bool use64BitLoad = IsFullPass;
#else
const bool use64BitLoad = false;
#endif
if (use64BitLoad) {
#if __CUDA_ARCH__ <= 350
const int outerUnroll = 2;
#else
const int outerUnroll = 1;
#endif
ComputeHistogram64BitLoads < BlockSize, outerUnroll, THist >(M, indices, partition->Offset, partition->Size, target, cindex, &smem[0]);
} else {
#if __CUDA_ARCH__ <= 300
const int innerUnroll = 2;
const int outerUnroll = 2;
#elif __CUDA_ARCH__ <= 350
const int innerUnroll = 4;
const int outerUnroll = 2;
#else
const int innerUnroll = 1;
const int outerUnroll = 1;
#endif
ComputeHistogram < BlockSize, outerUnroll, innerUnroll, THist > (M, indices, partition->Offset, partition->Size, target, cindex, &smem[0]);
}
__syncthreads();
const int fid = threadIdx.x >> 4;
const int fold = threadIdx.x & 15;
if (fid < fCount && fold < feature[fid].Folds) {
const float result = smem[fold * 8 + fid];
if (abs(result) > 1e-20) {
if (M > 1) {
atomicAdd(binSums + feature[fid].FirstFoldIndex + fold, result);
} else {
binSums[feature[fid].FirstFoldIndex + fold] = result;
}
}
}
}
}
template <int BlockSize,
int BlocksPerFeatureCount>
inline void RunComputeHist1HalfByteKernel(const TCFeature* nbFeatures, int nbCount,
const ui32* cindex,
const float* target,
const ui32* indices,
const TDataPartition* partition,
float* binSums,
const int binFeatureCount,
bool fullPass,
TCudaStream stream,
dim3 numBlocks)
{
if (fullPass) {
ComputeSplitPropertiesHalfByteImpl < BlockSize, true > << <numBlocks, BlockSize, 0, stream>>>(
BlocksPerFeatureCount, nbFeatures, nbCount, cindex, target, indices, partition, binSums, binFeatureCount
);
} else {
ComputeSplitPropertiesHalfByteImpl < BlockSize, false > << <numBlocks, BlockSize, 0, stream>>>(
BlocksPerFeatureCount, nbFeatures, nbCount, cindex, target, indices, partition, binSums, binFeatureCount);
}
}
void ComputeHist1Binary(const TCFeature* bFeatures, ui32 bCount,
const ui32* cindex,
const float* target,
const ui32* indices,
ui32 size,
const TDataPartition* partition,
ui32 partsCount,
ui32 foldCount,
bool fullPass,
ui32 histLineSize,
float* binSums,
TCudaStream stream) {
dim3 numBlocks;
numBlocks.x = (bCount + 31) / 32;
const int histCount = fullPass ? partsCount : partsCount / 2;
numBlocks.y = histCount;
numBlocks.z = foldCount;
const int blockSize = 768;
const ui32 multiplier = min(EstimateBlockPerFeatureMultiplier(numBlocks, size), 64);
numBlocks.x *= multiplier;
if (bCount) {
#define COMPUTE(k) \
RunComputeHist1BinaryKernel<blockSize, k>(bFeatures, bCount, cindex, target, indices, \
partition, binSums, histLineSize, fullPass, stream, numBlocks); \
if (multiplier == 1) {
COMPUTE(1)
} else if (multiplier == 2) {
COMPUTE(2)
} else if (multiplier == 4) {
COMPUTE(4)
} else if (multiplier == 8) {
COMPUTE(8);
} else if (multiplier == 16) {
COMPUTE(16)
} else if (multiplier == 32) {
COMPUTE(32)
} else if (multiplier == 64) {
COMPUTE(64)
} else {
exit(1);
}
#undef COMPUTE
}
}
void ComputeHist1HalfByte(const TCFeature* halfByteFeatures, ui32 halfByteFeaturesCount,
const ui32* cindex,
const float* target,
const ui32* indices,
ui32 size,
const TDataPartition* partition,
ui32 partsCount,
ui32 foldCount,
bool fullPass,
ui32 histLineSize,
float* binSums,
TCudaStream stream) {
dim3 numBlocks;
numBlocks.x = static_cast<ui32>((halfByteFeaturesCount + 7) / 8);
const int histCount = fullPass ? partsCount : partsCount / 2;
numBlocks.y = static_cast<ui32>(histCount);
numBlocks.z = foldCount;
const int blockSize = 768;
const ui32 multiplier = min(EstimateBlockPerFeatureMultiplier(numBlocks, size), 64);
numBlocks.x *= multiplier;
if (halfByteFeaturesCount) {
#define COMPUTE(k)\
RunComputeHist1HalfByteKernel<blockSize, k>(halfByteFeatures, halfByteFeaturesCount, cindex,\
target,\
indices, partition, binSums, histLineSize,\
fullPass,\
stream, numBlocks);
if (multiplier == 1) {
COMPUTE(1)
} else if (multiplier == 2) {
COMPUTE(2)
} else if (multiplier == 4) {
COMPUTE(4)
} else if (multiplier == 8) {
COMPUTE(8)
} else if (multiplier == 16) {
COMPUTE(16)
} else if (multiplier == 32) {
COMPUTE(32)
} else if (multiplier == 64) {
COMPUTE(64)
} else {
exit(1);
}
#undef COMPUTE
}
}
void ComputeHist1NonBinary(const TCFeature* nbFeatures, ui32 nbCount,
const ui32* cindex,
const float* target,
const ui32* indices,
ui32 size,
const TDataPartition* partition,
ui32 partCount,
ui32 foldCount,
bool fullPass,
ui32 histLineSize,
float* binSums,
TCudaStream stream) {
if (nbCount) {
dim3 numBlocks;
numBlocks.x = (nbCount + 3) / 4;
const int histCount = (fullPass ? partCount : partCount / 2);
numBlocks.y = histCount;
numBlocks.z = foldCount;
const int blockSize = 384;
const ui32 multiplier = min(EstimateBlockPerFeatureMultiplier(numBlocks, size), 64);
numBlocks.x *= multiplier;
#define COMPUTE(k) \
RunComputeHist1NonBinaryKernel<blockSize, k>(nbFeatures, nbCount, cindex, target, indices, \
partition, binSums, histLineSize, fullPass, stream, numBlocks);
if (multiplier == 1) {
COMPUTE(1)
} else if (multiplier == 2) {
COMPUTE(2)
} else if (multiplier == 4) {
COMPUTE(4)
} else if (multiplier == 8) {
COMPUTE(8)
} else if (multiplier == 16) {
COMPUTE(16)
} else if (multiplier == 32) {
COMPUTE(32)
} else if (multiplier == 64) {
COMPUTE(64)
} else {
exit(1);
}
#undef COMPUTE
}
}
}
| d43438eee1c80c6df64c60d6cb02ae066d98d705.cu | #include "pointwise_hist1.cuh"
#include "split_properties_helpers.cuh"
#include <cooperative_groups.h>
#include <catboost/cuda/cuda_lib/kernel/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/instructions.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
using namespace cooperative_groups;
namespace NKernel
{
template <int InnerHistBitsCount,
int BlockSize>
struct TPointHistOneByte {
volatile float* __restrict__ Buffer;
__forceinline__ __device__ int SliceOffset() {
const int warpOffset = 1024 * (threadIdx.x / 32);
const int blocks = 8 >> InnerHistBitsCount;
const int innerHistStart = (threadIdx.x & ((blocks - 1) << (InnerHistBitsCount + 2)));
return warpOffset + innerHistStart;
}
__forceinline__ __device__ TPointHistOneByte(float* buff) {
const int HIST_SIZE = 32 * BlockSize;
#pragma unroll 8
for (int i = threadIdx.x; i < HIST_SIZE; i += BlockSize) {
buff[i] = 0;
}
Buffer = buff + SliceOffset();
__syncthreads();
}
__device__ void AddPoint(ui32 ci, const float t) {
thread_block_tile<32> syncTile = tiled_partition<32>(this_thread_block());
#pragma unroll
for (int i = 0; i < 4; i++) {
short f = (threadIdx.x + i) & 3;
int bin = bfe(ci, 24 - 8 * f, 8);
const float statToAdd = (bin >> (5 + InnerHistBitsCount)) == 0 ? t : 0;
const int mask = (1 << InnerHistBitsCount) - 1;
const int higherBin = (bin >> 5) & mask;
int offset = 4 * higherBin + f + ((bin & 31) << 5);
if (InnerHistBitsCount > 0) {
#pragma unroll
for (int k = 0; k < (1 << InnerHistBitsCount); ++k) {
const int pass = ((threadIdx.x >> 2) + k) & mask;
syncTile.sync();
if (pass == higherBin) {
Buffer[offset] += statToAdd;
}
}
} else {
syncTile.sync();
Buffer[offset] += statToAdd;
}
}
}
__forceinline__ __device__ void Reduce() {
Buffer -= SliceOffset();
__syncthreads();
{
const int warpHistSize = 1024;
for (int start = threadIdx.x; start < warpHistSize; start += BlockSize) {
float sum = 0;
//12 iterations
#pragma unroll 12
for (int i = start; i < 32 * BlockSize; i += warpHistSize) {
sum += Buffer[i];
}
Buffer[warpHistSize + start] = sum;
}
}
__syncthreads();
//now we have only 1024 entries hist
const int warpHistBlockCount = 8 >> InnerHistBitsCount;
const int fold = threadIdx.x;
const int histSize = 1 << (5 + InnerHistBitsCount);
float sum[4];
#pragma unroll
for (int i = 0; i < 4; ++i) {
sum[i] = 0.0f;
}
if (fold < histSize) {
const int warpHistSize = 1024;
const int lowerBitsOffset = (fold & 31) << 5;
const int higherBin = (fold >> 5) & ((1 << InnerHistBitsCount) - 1);
const int blockSize = 4 * (1 << InnerHistBitsCount);
const volatile float* src = Buffer + warpHistSize + lowerBitsOffset + 4 * higherBin;
#pragma unroll
for (int block = 0; block < warpHistBlockCount; ++block) {
#pragma unroll
for (int i = 0; i < 4; ++i) {
sum[i] += src[i + block * blockSize];
}
}
}
__syncthreads();
if (fold < histSize) {
for (int i = 0; i < 4; ++i) {
Buffer[histSize * i + fold] = sum[i];
}
}
__syncthreads();
}
};
template <int BlockSize>
struct TPointHistHalfByte {
volatile float* Buffer;
__forceinline__ __device__ int SliceOffset() {
const int warpOffset = 512 * (threadIdx.x / 32);
const int innerHistStart = threadIdx.x & 24;
return warpOffset + innerHistStart;
}
__forceinline__ __device__ TPointHistHalfByte(float* buff) {
const int histSize = 16 * BlockSize;
for (int i = threadIdx.x; i < histSize; i += BlockSize) {
buff[i] = 0;
}
__syncthreads();
Buffer = buff + SliceOffset();
}
__forceinline__ __device__ void AddPoint(ui32 ci, const float t) {
thread_block_tile<8> addToHistTile = tiled_partition<8>(this_thread_block());
#pragma unroll 4
for (int i = 0; i < 8; i++) {
const int f = (threadIdx.x + i) & 7;
short bin = bfe(ci, 28 - 4 * f, 4);
bin <<= 5;
bin += f;
Buffer[bin] += t;
addToHistTile.sync();
}
}
__device__ void Reduce() {
Buffer -= SliceOffset();
__syncthreads();
{
const int HIST_SIZE = 16 * BlockSize;
float sum = 0;
if (threadIdx.x < 512) {
for (int i = threadIdx.x; i < HIST_SIZE; i += 512) {
sum += Buffer[i];
}
}
__syncthreads();
if (threadIdx.x < 512) {
Buffer[threadIdx.x] = sum;
}
__syncthreads();
}
const int fold = (threadIdx.x >> 3) & 15;
float sum = 0.0f;
if (threadIdx.x < 128) {
const int featureId = threadIdx.x & 7;
#pragma unroll
for (int group = 0; group < 4; ++group) {
sum += Buffer[32 * fold + featureId + 8 * group];
}
}
__syncthreads();
if (threadIdx.x < 128) {
Buffer[threadIdx.x] = sum;
}
__syncthreads();
}
};
template <int StripeSize, int OuterUnroll, int N, typename THist>
__forceinline__ __device__ void ComputeHistogram(int BlocksPerFeature, const ui32* __restrict__ indices,
int offset, int dsSize,
const float* __restrict__ target,
const ui32* __restrict__ cindex,
float* result) {
target += offset;
indices += offset;
THist hist(result);
int i = (threadIdx.x & 31) + (threadIdx.x / 32) * 32;
//all operations should be warp-aligned
//first: first warp make memory access aligned. it load first 32 - offset % 32 elements.
{
int lastId = min(dsSize, 32 - (offset & 31));
if ((blockIdx.x % BlocksPerFeature) == 0) {
const int index = i < lastId ? __ldg(indices + i) : 0;
const ui32 ci = i < lastId ? __ldg(cindex + index) : 0;
const float wt = i < lastId ? __ldg(target + i) : 0;
hist.AddPoint(ci, wt);
}
dsSize = max(dsSize - lastId, 0);
indices += lastId;
target += lastId;
}
//now lets align end
const int unalignedTail = (dsSize & 31);
if (unalignedTail != 0) {
if ((blockIdx.x % BlocksPerFeature) == 0)
{
const int tailOffset = dsSize - unalignedTail;
const int index = i < unalignedTail ? __ldg(indices + tailOffset + i) : 0;
const ui32 ci = i < unalignedTail ? __ldg(cindex + index) : 0;
const float wt = i < unalignedTail ? __ldg(target + tailOffset + i) : 0;
hist.AddPoint(ci, wt);
}
}
dsSize -= unalignedTail;
if (blockIdx.x % BlocksPerFeature == 0 && dsSize <= 0) {
__syncthreads();
hist.Reduce();
return;
}
indices += (blockIdx.x % BlocksPerFeature) * StripeSize;
target += (blockIdx.x % BlocksPerFeature) * StripeSize;
dsSize = max(dsSize - (blockIdx.x % BlocksPerFeature) * StripeSize, 0);
const int stripe = StripeSize * BlocksPerFeature;
if (dsSize) {
int iteration_count = (dsSize - i + (stripe - 1)) / stripe;
int blocked_iteration_count = ((dsSize - (i | 31) + (stripe - 1)) / stripe) / N;
target += i;
indices += i;
#pragma unroll OuterUnroll
for (int j = 0; j < blocked_iteration_count; ++j) {
ui32 local_index[N];
#pragma unroll
for (int k = 0; k < N; k++) {
local_index[k] = __ldg(indices + stripe * k);
}
ui32 local_ci[N];
float local_wt[N];
#pragma unroll
for (int k = 0; k < N; ++k) {
local_ci[k] = __ldg(cindex + local_index[k]);
local_wt[k] = __ldg(target + stripe * k);
}
#pragma unroll
for (int k = 0; k < N; ++k) {
hist.AddPoint(local_ci[k], local_wt[k]);
}
indices += stripe * N;
target += stripe * N;
}
for (int k = blocked_iteration_count * N; k < iteration_count; ++k) {
const int index = __ldg(indices);
ui32 ci = __ldg(cindex + index);
float wt = __ldg(target);
hist.AddPoint(ci, wt);
indices += stripe;
target += stripe;
}
__syncthreads();
hist.Reduce();
}
}
template <int StripeSize, int OuterUnroll, typename THist>
__forceinline__ __device__ void ComputeHistogram64BitLoads(int BlocksPerFeature, const ui32* __restrict__ indices,
int offset, int dsSize,
const float* __restrict__ target,
const ui32* __restrict__ cindex,
float* result) {
target += offset;
indices += offset;
THist hist(result);
if (dsSize) {
//first: first warp make memory access aligned. it load first 32 - offset % 32 elements.
{
int lastId = min(dsSize, 128 - (offset & 127));
int colId = (threadIdx.x & 31) + (threadIdx.x / 32 ) * 32;
if ((blockIdx.x % BlocksPerFeature) == 0)
{
for (; (colId < 128); colId += blockDim.x)
{
const int index = colId < lastId ? __ldg(indices + colId) : 0;
const ui32 ci = colId < lastId ? __ldg(cindex + index) : 0;
const float wt = colId < lastId ? __ldg(target + colId) : 0;
hist.AddPoint(ci, wt);
}
}
dsSize = max(dsSize - lastId, 0);
indices += lastId;
target += lastId;
}
//now lets align end
const int unalignedTail = (dsSize & 63);
if (unalignedTail != 0) {
if ((blockIdx.x % BlocksPerFeature) == 0)
{
int colId = (threadIdx.x & 31) + (threadIdx.x / 32 ) * 32;
const int tailOffset = dsSize - unalignedTail;
for (; (colId < 64); colId += blockDim.x) {
const int index = colId < unalignedTail ? __ldg(indices + tailOffset + colId) : 0;
const ui32 ci = colId < unalignedTail ? __ldg(cindex + index) : 0;
const float wt = colId < unalignedTail ? __ldg(target + tailOffset + colId) : 0;
hist.AddPoint(ci, wt);
}
}
}
dsSize -= unalignedTail;
if (dsSize <= 0) {
if ((blockIdx.x % BlocksPerFeature) == 0) {
__syncthreads();
hist.Reduce();
}
return;
}
indices += (blockIdx.x % BlocksPerFeature) * StripeSize * 2;
target += (blockIdx.x % BlocksPerFeature) * StripeSize * 2;
const int stripe = StripeSize * BlocksPerFeature * 2;
dsSize = max(dsSize - (blockIdx.x % BlocksPerFeature) * StripeSize * 2, 0);
if (dsSize) {
int iterCount;
{
const int i = 2 * ((threadIdx.x & 31) + (threadIdx.x / 32) * 32);
target += i;
indices += i;
iterCount = (dsSize - i + (stripe - 1)) / stripe;
}
#pragma unroll OuterUnroll
for (int j = 0; j < iterCount; ++j) {
const uint2 localIndices = __ldg((uint2*) indices);
const ui32 firstBin = __ldg(cindex + localIndices.x);
const ui32 secondBin = __ldg(cindex + localIndices.y);
const float2 localTarget = __ldg((float2* )(target));
hist.AddPoint(firstBin, localTarget.x);
hist.AddPoint(secondBin, localTarget.y);
indices += stripe;
target += stripe;
}
__syncthreads();
hist.Reduce();
}
}
}
template <int BlockSize,
int InnerHistBitsCount,
bool Use64BitLoads>
__forceinline__ __device__ void ComputeSplitPropertiesPass(int BlocksPerFeature, const TCFeature* __restrict__ feature,
const ui32* __restrict__ cindex,
const float* __restrict__ target,
const ui32* __restrict__ indices,
const TDataPartition* __restrict__ partition, int fCount,
float* binSumsForPart,
float* __restrict__ smem) {
using THist = TPointHistOneByte<InnerHistBitsCount, BlockSize>;
if (Use64BitLoads) {
#if __CUDA_ARCH__ < 300
const int outerUnroll = 2;
#elif __CUDA_ARCH__ <= 350
const int outerUnroll = 2;
#else
const int outerUnroll = InnerHistBitsCount == 0 ? 4 : 2;
#endif
const int size = partition->Size;
const int offset = partition->Offset;
ComputeHistogram64BitLoads < BlockSize, outerUnroll, THist > (BlocksPerFeature,
indices,
offset,
size,
target,
cindex,
smem);
} else {
#if __CUDA_ARCH__ < 300
const int innerUnroll = InnerHistBitsCount == 0 ? 4 : 2;
const int outerUnroll = 2;
#elif __CUDA_ARCH__ <= 350
const int innerUnroll = InnerHistBitsCount == 0 ? 8 : 4;
const int outerUnroll = 2;
#else
const int innerUnroll = 4;
const int outerUnroll = 2;
#endif
ComputeHistogram<BlockSize, outerUnroll, innerUnroll, THist>(BlocksPerFeature,
indices,
partition->Offset,
partition->Size,
target,
cindex,
smem);
}
__syncthreads();
const int fold = threadIdx.x;
const int histSize = 1 << (5 + InnerHistBitsCount);
#pragma unroll 4
for (int fid = 0; fid < fCount; ++fid) {
if (fold < feature[fid].Folds) {
const float val = smem[fid * histSize + fold];
if (abs(val) > 1e-20f) {
if (BlocksPerFeature > 1) {
atomicAdd(binSumsForPart + (feature[fid].FirstFoldIndex + fold), val);
} else {
WriteThrough(binSumsForPart + (feature[fid].FirstFoldIndex + fold), val);
}
}
}
}
}
#define DECLARE_PASS(I, M, USE_64_BIT_LOAD) \
ComputeSplitPropertiesPass<BlockSize, I, USE_64_BIT_LOAD>(M, feature, cindex, target, indices, partition, fCount, binSums, &counters[0]);
template <int BlockSize, bool IsFullPass>
#if __CUDA_ARCH__ == 600
__launch_bounds__(BlockSize, 1)
#elif __CUDA_ARCH__ >= 520
__launch_bounds__(BlockSize, 2)
#else
__launch_bounds__(BlockSize, 1)
#endif
__global__ void ComputeSplitPropertiesNBImpl(int M, const TCFeature* __restrict__ feature, int fCount,
const ui32* __restrict__ cindex,
const float* __restrict__ target,
const ui32* __restrict__ indices,
const TDataPartition* __restrict__ partition,
float* __restrict__ binSums,
const int totalFeatureCount) {
TPointwisePartOffsetsHelper helper(gridDim.z);
helper.ShiftPartAndBinSumsPtr(partition, binSums, totalFeatureCount, IsFullPass, 1);
feature += (blockIdx.x / M) * 4;
cindex += feature->Offset;
fCount = min(fCount - (blockIdx.x / M) * 4, 4);
__shared__ float counters[32 * BlockSize];
const int maxBinCount = GetMaxBinCount(feature, fCount, (int*) &counters[0]);
__syncthreads();
//CatBoost always use direct loads on first pass of histograms calculation and for this step 64-bits loads are almost x2 faster
#if __CUDA_ARCH__ > 350
const bool use64BitLoad = IsFullPass;// float2 for target/indices/weights
#else
const bool use64BitLoad = false;
#endif
if (partition->Size) {
if (maxBinCount <= 32) {
DECLARE_PASS(0, M, use64BitLoad);
} else if (maxBinCount <= 64) {
DECLARE_PASS(1, M, false);
} else if (maxBinCount <= 128) {
DECLARE_PASS(2, M, false);
} else {
DECLARE_PASS(3, M, false);
}
}
}
template <int BlockSize, bool IsFullPass>
#if __CUDA_ARCH__ >= 520
__launch_bounds__(BlockSize, 2)
#else
__launch_bounds__(BlockSize, 1)
#endif
__global__ void ComputeSplitPropertiesBImpl(int M,
const TCFeature* __restrict__ feature, int fCount, const ui32* __restrict__ cindex,
const float* __restrict__ target,
const ui32* __restrict__ indices,
const TDataPartition* __restrict__ partition,
float* __restrict__ binSums,
int totalFeatureCount) {
TPointwisePartOffsetsHelper helper(gridDim.z);
helper.ShiftPartAndBinSumsPtr(partition, binSums, totalFeatureCount, IsFullPass, 1);
feature += (blockIdx.x / M) * 32;
cindex += feature->Offset;
fCount = min(fCount - (blockIdx.x / M) * 32, 32);
__shared__ float counters[16 * BlockSize];
if (partition->Size) {
using THist = TPointHistHalfByte<BlockSize>;
#if __CUDA_ARCH__ > 350
const bool use64bitLoad = IsFullPass;
#else
const bool use64bitLoad = false;
#endif
if (use64bitLoad) {
//full pass
#if __CUDA_ARCH__ <= 350
const int outerUnroll = 1;
#else
const int outerUnroll = 1;
#endif
ComputeHistogram64BitLoads < BlockSize, outerUnroll, THist > (M, indices, partition->Offset, partition->Size, target, cindex, &counters[0]);
} else {
#if __CUDA_ARCH__ <= 300
const int innerUnroll = 2;
const int outerUnroll = 1;
#elif __CUDA_ARCH__ <= 350
const int innerUnroll = 4;
const int outerUnroll = 1;
#else
const int innerUnroll = 1;
const int outerUnroll = 1;
#endif
ComputeHistogram < BlockSize, outerUnroll, innerUnroll, THist > (M, indices,
partition->Offset,
partition->Size,
target,
cindex,
&counters[0]);
}
ui32 fid = threadIdx.x;
if (fid < fCount) {
const int groupId = fid / 4;
const int fMask = 1 << (3 - (fid & 3));
float sum = 0.f;
#pragma uroll
for (int i = 0; i < 16; i++) {
if (!(i & fMask)) {
sum += counters[8 * i + groupId];
}
}
if (abs(sum) > 1e-20f) {
if (M > 1) {
atomicAdd(binSums + feature[fid].FirstFoldIndex, sum);
} else {
binSums[feature[fid].FirstFoldIndex] = sum;
}
}
}
}
}
template <int BlockSize,
int BlocksPerFeatureCount>
inline void RunComputeHist1NonBinaryKernel(const TCFeature* nbFeatures, int nbCount,
const ui32* cindex,
const float* target, const ui32* indices,
const TDataPartition* partition,
float* binSums,
const int binFeatureCount,
bool fullPass,
TCudaStream stream,
dim3 numBlocks)
{
if (fullPass) {
ComputeSplitPropertiesNBImpl < BlockSize, true > << <numBlocks, BlockSize, 0, stream>>>(BlocksPerFeatureCount,
nbFeatures, nbCount, cindex, target,
indices, partition, binSums, binFeatureCount
);
} else {
ComputeSplitPropertiesNBImpl < BlockSize, false > << <numBlocks, BlockSize, 0, stream>>>( BlocksPerFeatureCount,
nbFeatures, nbCount, cindex, target,
indices, partition, binSums, binFeatureCount
);
}
}
template <int BlockSize, int BlocksPerFeatureCount>
void RunComputeHist1BinaryKernel(const TCFeature* bFeatures, int bCount,
const ui32* cindex,
const float* target, const ui32* indices,
const TDataPartition* partition,
float* binSums,
int histLineSize,
bool fullPass,
TCudaStream stream,
dim3 numBlocks) {
if (fullPass) {
ComputeSplitPropertiesBImpl < BlockSize, true > << <numBlocks, BlockSize, 0, stream>>>(BlocksPerFeatureCount, bFeatures, bCount, cindex, target, indices, partition, binSums, histLineSize);
} else {
ComputeSplitPropertiesBImpl < BlockSize, false > << <numBlocks, BlockSize, 0, stream>>>(BlocksPerFeatureCount, bFeatures, bCount, cindex, target, indices, partition, binSums, histLineSize);
}
};
template <int BlockSize, bool IsFullPass>
#if __CUDA_ARCH__ >= 520
__launch_bounds__(BlockSize, 2)
#else
__launch_bounds__(BlockSize, 1)
#endif
__global__ void ComputeSplitPropertiesHalfByteImpl(
int M,
const TCFeature* __restrict__ feature, int fCount,
const ui32* __restrict__ cindex,
const float* __restrict__ target,
const ui32* __restrict__ indices,
const TDataPartition* __restrict__ partition,
float* __restrict__ binSums,
const int totalFeatureCount) {
TPointwisePartOffsetsHelper helper(gridDim.z);
helper.ShiftPartAndBinSumsPtr(partition, binSums, totalFeatureCount, IsFullPass, 1);
feature += (blockIdx.x / M) * 8;
cindex += feature->Offset;
fCount = min(fCount - (blockIdx.x / M) * 8, 8);
__shared__ float smem[16 * BlockSize];
if (partition->Size) {
using THist = TPointHistHalfByte<BlockSize>;
#if __CUDA_ARCH__ > 350
const bool use64BitLoad = IsFullPass;
#else
const bool use64BitLoad = false;
#endif
if (use64BitLoad) {
#if __CUDA_ARCH__ <= 350
const int outerUnroll = 2;
#else
const int outerUnroll = 1;
#endif
ComputeHistogram64BitLoads < BlockSize, outerUnroll, THist >(M, indices, partition->Offset, partition->Size, target, cindex, &smem[0]);
} else {
#if __CUDA_ARCH__ <= 300
const int innerUnroll = 2;
const int outerUnroll = 2;
#elif __CUDA_ARCH__ <= 350
const int innerUnroll = 4;
const int outerUnroll = 2;
#else
const int innerUnroll = 1;
const int outerUnroll = 1;
#endif
ComputeHistogram < BlockSize, outerUnroll, innerUnroll, THist > (M, indices, partition->Offset, partition->Size, target, cindex, &smem[0]);
}
__syncthreads();
const int fid = threadIdx.x >> 4;
const int fold = threadIdx.x & 15;
if (fid < fCount && fold < feature[fid].Folds) {
const float result = smem[fold * 8 + fid];
if (abs(result) > 1e-20) {
if (M > 1) {
atomicAdd(binSums + feature[fid].FirstFoldIndex + fold, result);
} else {
binSums[feature[fid].FirstFoldIndex + fold] = result;
}
}
}
}
}
template <int BlockSize,
int BlocksPerFeatureCount>
inline void RunComputeHist1HalfByteKernel(const TCFeature* nbFeatures, int nbCount,
const ui32* cindex,
const float* target,
const ui32* indices,
const TDataPartition* partition,
float* binSums,
const int binFeatureCount,
bool fullPass,
TCudaStream stream,
dim3 numBlocks)
{
if (fullPass) {
ComputeSplitPropertiesHalfByteImpl < BlockSize, true > << <numBlocks, BlockSize, 0, stream>>>(
BlocksPerFeatureCount, nbFeatures, nbCount, cindex, target, indices, partition, binSums, binFeatureCount
);
} else {
ComputeSplitPropertiesHalfByteImpl < BlockSize, false > << <numBlocks, BlockSize, 0, stream>>>(
BlocksPerFeatureCount, nbFeatures, nbCount, cindex, target, indices, partition, binSums, binFeatureCount);
}
}
void ComputeHist1Binary(const TCFeature* bFeatures, ui32 bCount,
const ui32* cindex,
const float* target,
const ui32* indices,
ui32 size,
const TDataPartition* partition,
ui32 partsCount,
ui32 foldCount,
bool fullPass,
ui32 histLineSize,
float* binSums,
TCudaStream stream) {
dim3 numBlocks;
numBlocks.x = (bCount + 31) / 32;
const int histCount = fullPass ? partsCount : partsCount / 2;
numBlocks.y = histCount;
numBlocks.z = foldCount;
const int blockSize = 768;
const ui32 multiplier = min(EstimateBlockPerFeatureMultiplier(numBlocks, size), 64);
numBlocks.x *= multiplier;
if (bCount) {
#define COMPUTE(k) \
RunComputeHist1BinaryKernel<blockSize, k>(bFeatures, bCount, cindex, target, indices, \
partition, binSums, histLineSize, fullPass, stream, numBlocks); \
if (multiplier == 1) {
COMPUTE(1)
} else if (multiplier == 2) {
COMPUTE(2)
} else if (multiplier == 4) {
COMPUTE(4)
} else if (multiplier == 8) {
COMPUTE(8);
} else if (multiplier == 16) {
COMPUTE(16)
} else if (multiplier == 32) {
COMPUTE(32)
} else if (multiplier == 64) {
COMPUTE(64)
} else {
exit(1);
}
#undef COMPUTE
}
}
void ComputeHist1HalfByte(const TCFeature* halfByteFeatures, ui32 halfByteFeaturesCount,
const ui32* cindex,
const float* target,
const ui32* indices,
ui32 size,
const TDataPartition* partition,
ui32 partsCount,
ui32 foldCount,
bool fullPass,
ui32 histLineSize,
float* binSums,
TCudaStream stream) {
dim3 numBlocks;
numBlocks.x = static_cast<ui32>((halfByteFeaturesCount + 7) / 8);
const int histCount = fullPass ? partsCount : partsCount / 2;
numBlocks.y = static_cast<ui32>(histCount);
numBlocks.z = foldCount;
const int blockSize = 768;
const ui32 multiplier = min(EstimateBlockPerFeatureMultiplier(numBlocks, size), 64);
numBlocks.x *= multiplier;
if (halfByteFeaturesCount) {
#define COMPUTE(k)\
RunComputeHist1HalfByteKernel<blockSize, k>(halfByteFeatures, halfByteFeaturesCount, cindex,\
target,\
indices, partition, binSums, histLineSize,\
fullPass,\
stream, numBlocks);
if (multiplier == 1) {
COMPUTE(1)
} else if (multiplier == 2) {
COMPUTE(2)
} else if (multiplier == 4) {
COMPUTE(4)
} else if (multiplier == 8) {
COMPUTE(8)
} else if (multiplier == 16) {
COMPUTE(16)
} else if (multiplier == 32) {
COMPUTE(32)
} else if (multiplier == 64) {
COMPUTE(64)
} else {
exit(1);
}
#undef COMPUTE
}
}
void ComputeHist1NonBinary(const TCFeature* nbFeatures, ui32 nbCount,
const ui32* cindex,
const float* target,
const ui32* indices,
ui32 size,
const TDataPartition* partition,
ui32 partCount,
ui32 foldCount,
bool fullPass,
ui32 histLineSize,
float* binSums,
TCudaStream stream) {
if (nbCount) {
dim3 numBlocks;
numBlocks.x = (nbCount + 3) / 4;
const int histCount = (fullPass ? partCount : partCount / 2);
numBlocks.y = histCount;
numBlocks.z = foldCount;
const int blockSize = 384;
const ui32 multiplier = min(EstimateBlockPerFeatureMultiplier(numBlocks, size), 64);
numBlocks.x *= multiplier;
#define COMPUTE(k) \
RunComputeHist1NonBinaryKernel<blockSize, k>(nbFeatures, nbCount, cindex, target, indices, \
partition, binSums, histLineSize, fullPass, stream, numBlocks);
if (multiplier == 1) {
COMPUTE(1)
} else if (multiplier == 2) {
COMPUTE(2)
} else if (multiplier == 4) {
COMPUTE(4)
} else if (multiplier == 8) {
COMPUTE(8)
} else if (multiplier == 16) {
COMPUTE(16)
} else if (multiplier == 32) {
COMPUTE(32)
} else if (multiplier == 64) {
COMPUTE(64)
} else {
exit(1);
}
#undef COMPUTE
}
}
}
|
185f5c4cece5e2b5e3f24a9df03c978383a83d00.hip | // !!! This is a file automatically generated by hipify!!!
/********************************************************************************************
source Code : deviceDetails.cu
Objective : Example code to demonstrate the number of devices that are present on the
current system and their properties
Description: To query using the cuda API calls about the various properties of the devices like
the device model,max number of threads per block, compute capability,warp size,
available Global, shared, and constant memories etc.
input: none
output: The various properties of all the devices that are present on the current system
**********************************************************************************************/
#include <hip/hip_runtime.h>
#include<stdio.h>
///////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// main routene to find the gpu devices that are presented on the system
// querying the various details of all the devices that are presented and printing the details
//
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc,char* argv[])
{
int deviceCount;
hipGetDeviceCount(&deviceCount);
int device;
for (device = 0; device < deviceCount; ++device) {
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, device);
if (device == 0)
{
if (deviceProp.major == 9999 && deviceProp.minor == 9999)
{
printf("\n\nThere is no device supporting CUDA.\n");
break;
}
else
printf("\n\nThere are %d device(s) supporting CUDA\n",deviceCount);
}
printf("\n\n********************* DEVICE-%d DETAILS *******************\n",device);
printf("The name of the device : %s\n",deviceProp.name);
printf("The compute capability : %d.%d\n",deviceProp.major,deviceProp.minor);
printf("The warp size : %d\n",deviceProp.warpSize);
printf("The Global memory available on device : %lf GBytes\n",(double)deviceProp.totalGlobalMem/1000000000);
printf("The Constant memory available on device: %ld Bytes\n",deviceProp.totalConstMem);
printf("The shared memory available per Block : %ld Bytes\n",deviceProp.sharedMemPerBlock);
printf("The registers available per Block : %d\n",deviceProp.regsPerBlock);
printf("The number of multiprocessors on the device : %d\n",deviceProp.multiProcessorCount);
printf("The max number of threads per Block : %d\n",deviceProp.maxThreadsPerBlock);
printf("The max sizes of each dimension of a block: (%d,%d,%d)\n",deviceProp.maxThreadsDim[0],deviceProp.maxThreadsDim[1],deviceProp.maxThreadsDim[2]);
printf("The max sizes of each dimension of a grid: (%d,%d,%d)\n",deviceProp.maxGridSize[0],deviceProp.maxGridSize[1],deviceProp.maxGridSize[2]);
printf("----------------------------------------------------------\n\n");
}
return 0;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
| 185f5c4cece5e2b5e3f24a9df03c978383a83d00.cu |
/********************************************************************************************
source Code : deviceDetails.cu
Objective : Example code to demonstrate the number of devices that are present on the
current system and their properties
Description: To query using the cuda API calls about the various properties of the devices like
the device model,max number of threads per block, compute capability,warp size,
available Global, shared, and constant memories etc.
input: none
output: The various properties of all the devices that are present on the current system
**********************************************************************************************/
#include <cuda.h>
#include<stdio.h>
///////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// main routene to find the gpu devices that are presented on the system
// querying the various details of all the devices that are presented and printing the details
//
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc,char* argv[])
{
int deviceCount;
cudaGetDeviceCount(&deviceCount);
int device;
for (device = 0; device < deviceCount; ++device) {
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, device);
if (device == 0)
{
if (deviceProp.major == 9999 && deviceProp.minor == 9999)
{
printf("\n\nThere is no device supporting CUDA.\n");
break;
}
else
printf("\n\nThere are %d device(s) supporting CUDA\n",deviceCount);
}
printf("\n\n********************* DEVICE-%d DETAILS *******************\n",device);
printf("The name of the device : %s\n",deviceProp.name);
printf("The compute capability : %d.%d\n",deviceProp.major,deviceProp.minor);
printf("The warp size : %d\n",deviceProp.warpSize);
printf("The Global memory available on device : %lf GBytes\n",(double)deviceProp.totalGlobalMem/1000000000);
printf("The Constant memory available on device: %ld Bytes\n",deviceProp.totalConstMem);
printf("The shared memory available per Block : %ld Bytes\n",deviceProp.sharedMemPerBlock);
printf("The registers available per Block : %d\n",deviceProp.regsPerBlock);
printf("The number of multiprocessors on the device : %d\n",deviceProp.multiProcessorCount);
printf("The max number of threads per Block : %d\n",deviceProp.maxThreadsPerBlock);
printf("The max sizes of each dimension of a block: (%d,%d,%d)\n",deviceProp.maxThreadsDim[0],deviceProp.maxThreadsDim[1],deviceProp.maxThreadsDim[2]);
printf("The max sizes of each dimension of a grid: (%d,%d,%d)\n",deviceProp.maxGridSize[0],deviceProp.maxGridSize[1],deviceProp.maxGridSize[2]);
printf("----------------------------------------------------------\n\n");
}
return 0;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
293512e73d54e19e75143eb8e74b2346346a323a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel2_xvel_plus_4_front;
int xdim0_update_halo_kernel2_xvel_plus_4_front_h = -1;
__constant__ int ydim0_update_halo_kernel2_xvel_plus_4_front;
int ydim0_update_halo_kernel2_xvel_plus_4_front_h = -1;
__constant__ int xdim1_update_halo_kernel2_xvel_plus_4_front;
int xdim1_update_halo_kernel2_xvel_plus_4_front_h = -1;
__constant__ int ydim1_update_halo_kernel2_xvel_plus_4_front;
int ydim1_update_halo_kernel2_xvel_plus_4_front_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel2_xvel_plus_4_front * (y) + \
xdim0_update_halo_kernel2_xvel_plus_4_front * \
ydim0_update_halo_kernel2_xvel_plus_4_front * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel2_xvel_plus_4_front * (y) + \
xdim1_update_halo_kernel2_xvel_plus_4_front * \
ydim1_update_halo_kernel2_xvel_plus_4_front * (z))
// user function
__device__
inline void
update_halo_kernel2_xvel_plus_4_front_gpu(double *xvel0, double *xvel1,
const int *fields) {
if (fields[FIELD_XVEL0] == 1)
xvel0[OPS_ACC0(0, 0, 0)] = xvel0[OPS_ACC0(0, 0, -4)];
if (fields[FIELD_XVEL1] == 1)
xvel1[OPS_ACC1(0, 0, 0)] = xvel1[OPS_ACC1(0, 0, -4)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel2_xvel_plus_4_front(
double *__restrict arg0, double *__restrict arg1,
const int *__restrict arg2, int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim0_update_halo_kernel2_xvel_plus_4_front +
idx_z * 1 * 1 * xdim0_update_halo_kernel2_xvel_plus_4_front *
ydim0_update_halo_kernel2_xvel_plus_4_front;
arg1 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim1_update_halo_kernel2_xvel_plus_4_front +
idx_z * 1 * 1 * xdim1_update_halo_kernel2_xvel_plus_4_front *
ydim1_update_halo_kernel2_xvel_plus_4_front;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel2_xvel_plus_4_front_gpu(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel2_xvel_plus_4_front(
char const *name, ops_block block, int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 79))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(79, "update_halo_kernel2_xvel_plus_4_front");
OPS_kernels[79].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel2_xvel_plus_4_front_h ||
ydim0 != ydim0_update_halo_kernel2_xvel_plus_4_front_h ||
xdim1 != xdim1_update_halo_kernel2_xvel_plus_4_front_h ||
ydim1 != ydim1_update_halo_kernel2_xvel_plus_4_front_h) {
hipMemcpyToSymbol(xdim0_update_halo_kernel2_xvel_plus_4_front, &xdim0,
sizeof(int));
xdim0_update_halo_kernel2_xvel_plus_4_front_h = xdim0;
hipMemcpyToSymbol(ydim0_update_halo_kernel2_xvel_plus_4_front, &ydim0,
sizeof(int));
ydim0_update_halo_kernel2_xvel_plus_4_front_h = ydim0;
hipMemcpyToSymbol(xdim1_update_halo_kernel2_xvel_plus_4_front, &xdim1,
sizeof(int));
xdim1_update_halo_kernel2_xvel_plus_4_front_h = xdim1;
hipMemcpyToSymbol(ydim1_update_halo_kernel2_xvel_plus_4_front, &ydim1,
sizeof(int));
ydim1_update_halo_kernel2_xvel_plus_4_front_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[79].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_update_halo_kernel2_xvel_plus_4_front), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[79].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[79].mpi_time += t2 - t1;
OPS_kernels[79].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[79].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
| 293512e73d54e19e75143eb8e74b2346346a323a.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel2_xvel_plus_4_front;
int xdim0_update_halo_kernel2_xvel_plus_4_front_h = -1;
__constant__ int ydim0_update_halo_kernel2_xvel_plus_4_front;
int ydim0_update_halo_kernel2_xvel_plus_4_front_h = -1;
__constant__ int xdim1_update_halo_kernel2_xvel_plus_4_front;
int xdim1_update_halo_kernel2_xvel_plus_4_front_h = -1;
__constant__ int ydim1_update_halo_kernel2_xvel_plus_4_front;
int ydim1_update_halo_kernel2_xvel_plus_4_front_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel2_xvel_plus_4_front * (y) + \
xdim0_update_halo_kernel2_xvel_plus_4_front * \
ydim0_update_halo_kernel2_xvel_plus_4_front * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel2_xvel_plus_4_front * (y) + \
xdim1_update_halo_kernel2_xvel_plus_4_front * \
ydim1_update_halo_kernel2_xvel_plus_4_front * (z))
// user function
__device__
inline void
update_halo_kernel2_xvel_plus_4_front_gpu(double *xvel0, double *xvel1,
const int *fields) {
if (fields[FIELD_XVEL0] == 1)
xvel0[OPS_ACC0(0, 0, 0)] = xvel0[OPS_ACC0(0, 0, -4)];
if (fields[FIELD_XVEL1] == 1)
xvel1[OPS_ACC1(0, 0, 0)] = xvel1[OPS_ACC1(0, 0, -4)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel2_xvel_plus_4_front(
double *__restrict arg0, double *__restrict arg1,
const int *__restrict arg2, int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim0_update_halo_kernel2_xvel_plus_4_front +
idx_z * 1 * 1 * xdim0_update_halo_kernel2_xvel_plus_4_front *
ydim0_update_halo_kernel2_xvel_plus_4_front;
arg1 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim1_update_halo_kernel2_xvel_plus_4_front +
idx_z * 1 * 1 * xdim1_update_halo_kernel2_xvel_plus_4_front *
ydim1_update_halo_kernel2_xvel_plus_4_front;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel2_xvel_plus_4_front_gpu(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel2_xvel_plus_4_front(
char const *name, ops_block block, int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 79))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(79, "update_halo_kernel2_xvel_plus_4_front");
OPS_kernels[79].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel2_xvel_plus_4_front_h ||
ydim0 != ydim0_update_halo_kernel2_xvel_plus_4_front_h ||
xdim1 != xdim1_update_halo_kernel2_xvel_plus_4_front_h ||
ydim1 != ydim1_update_halo_kernel2_xvel_plus_4_front_h) {
cudaMemcpyToSymbol(xdim0_update_halo_kernel2_xvel_plus_4_front, &xdim0,
sizeof(int));
xdim0_update_halo_kernel2_xvel_plus_4_front_h = xdim0;
cudaMemcpyToSymbol(ydim0_update_halo_kernel2_xvel_plus_4_front, &ydim0,
sizeof(int));
ydim0_update_halo_kernel2_xvel_plus_4_front_h = ydim0;
cudaMemcpyToSymbol(xdim1_update_halo_kernel2_xvel_plus_4_front, &xdim1,
sizeof(int));
xdim1_update_halo_kernel2_xvel_plus_4_front_h = xdim1;
cudaMemcpyToSymbol(ydim1_update_halo_kernel2_xvel_plus_4_front, &ydim1,
sizeof(int));
ydim1_update_halo_kernel2_xvel_plus_4_front_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[79].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_update_halo_kernel2_xvel_plus_4_front<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[79].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[79].mpi_time += t2 - t1;
OPS_kernels[79].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[79].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
|
6fe3d3da47b1b35ccb04f5ddf675514832fbaaa4.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2010-2011, NVIDIA Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA Corporation nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <nih/bvh/cuda/sah_builder.h>
#include <nih/sampling/random.h>
#include <nih/time/timer.h>
#include <nih/basic/cuda_domains.h>
namespace nih {
void sah_bvh_test()
{
fprintf(stderr, "sah bvh test... started\n");
const uint32 n_objs = 1024*1024;
const uint32 n_tests = 100;
thrust::host_vector<Bbox4f> h_bboxes( n_objs );
Random random;
for (uint32 i = 0; i < n_objs; ++i)
h_bboxes[i] = Bbox4f( Vector4f( random.next(), random.next(), random.next(), 1.0f ) );
thrust::device_vector<Bbox4f> d_bboxes( h_bboxes );
thrust::device_vector<Bvh_node> bvh_nodes;
thrust::device_vector<uint2> bvh_leaves;
thrust::device_vector<uint32> bvh_index;
cuda::Sah_builder builder( bvh_nodes, bvh_leaves, bvh_index );
hipEvent_t start, stop;
hipEventCreate( &start );
hipEventCreate( &stop );
float time = 0.0f;
for (uint32 i = 0; i <= n_tests; ++i)
{
float dtime;
hipEventRecord( start, 0 );
builder.build(
Bbox3f( Vector3f(0.0f), Vector3f(1.0f) ),
d_bboxes.begin(),
d_bboxes.begin() + n_objs,
4u,
1.8f );
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
hipEventElapsedTime( &dtime, start, stop );
if (i) // skip the first run
time += dtime;
}
time /= 1000.0f * float(n_tests);
hipEventDestroy( start );
hipEventDestroy( stop );
fprintf(stderr, "sah bvh test... done\n");
fprintf(stderr, " time : %f ms\n", time * 1000.0f );
fprintf(stderr, " objs/sec : %f M\n", (n_objs / time) / 1.0e6f );
fprintf(stderr, " nodes : %u\n", builder.m_node_count );
fprintf(stderr, " leaves : %u\n", builder.m_leaf_count );
fprintf(stderr, " levels : %u\n", builder.m_level_count );
fprintf(stderr, " sorting : %f ms\n", builder.m_sorting_time / float(n_tests) );
fprintf(stderr, " compression : %f ms\n", builder.m_compression_time / float(n_tests) );
fprintf(stderr, " sah split : %f ms\n", builder.m_sah_split_time / float(n_tests) );
fprintf(stderr, " distribute objects : %f ms\n", builder.m_distribute_objects_time / float(n_tests) );
fprintf(stderr, " temp storage : %.1f MB\n", float(builder.m_temp_storage) / 1.0e6f );
}
} // namespace nih
| 6fe3d3da47b1b35ccb04f5ddf675514832fbaaa4.cu | /*
* Copyright (c) 2010-2011, NVIDIA Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA Corporation nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <nih/bvh/cuda/sah_builder.h>
#include <nih/sampling/random.h>
#include <nih/time/timer.h>
#include <nih/basic/cuda_domains.h>
namespace nih {
void sah_bvh_test()
{
fprintf(stderr, "sah bvh test... started\n");
const uint32 n_objs = 1024*1024;
const uint32 n_tests = 100;
thrust::host_vector<Bbox4f> h_bboxes( n_objs );
Random random;
for (uint32 i = 0; i < n_objs; ++i)
h_bboxes[i] = Bbox4f( Vector4f( random.next(), random.next(), random.next(), 1.0f ) );
thrust::device_vector<Bbox4f> d_bboxes( h_bboxes );
thrust::device_vector<Bvh_node> bvh_nodes;
thrust::device_vector<uint2> bvh_leaves;
thrust::device_vector<uint32> bvh_index;
cuda::Sah_builder builder( bvh_nodes, bvh_leaves, bvh_index );
cudaEvent_t start, stop;
cudaEventCreate( &start );
cudaEventCreate( &stop );
float time = 0.0f;
for (uint32 i = 0; i <= n_tests; ++i)
{
float dtime;
cudaEventRecord( start, 0 );
builder.build(
Bbox3f( Vector3f(0.0f), Vector3f(1.0f) ),
d_bboxes.begin(),
d_bboxes.begin() + n_objs,
4u,
1.8f );
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &dtime, start, stop );
if (i) // skip the first run
time += dtime;
}
time /= 1000.0f * float(n_tests);
cudaEventDestroy( start );
cudaEventDestroy( stop );
fprintf(stderr, "sah bvh test... done\n");
fprintf(stderr, " time : %f ms\n", time * 1000.0f );
fprintf(stderr, " objs/sec : %f M\n", (n_objs / time) / 1.0e6f );
fprintf(stderr, " nodes : %u\n", builder.m_node_count );
fprintf(stderr, " leaves : %u\n", builder.m_leaf_count );
fprintf(stderr, " levels : %u\n", builder.m_level_count );
fprintf(stderr, " sorting : %f ms\n", builder.m_sorting_time / float(n_tests) );
fprintf(stderr, " compression : %f ms\n", builder.m_compression_time / float(n_tests) );
fprintf(stderr, " sah split : %f ms\n", builder.m_sah_split_time / float(n_tests) );
fprintf(stderr, " distribute objects : %f ms\n", builder.m_distribute_objects_time / float(n_tests) );
fprintf(stderr, " temp storage : %.1f MB\n", float(builder.m_temp_storage) / 1.0e6f );
}
} // namespace nih
|
ff7ccfe088c347a3db058660df91ceaf30f03245.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// GPU device code
// Using Monte Carlo MRG32k3a
//
#include <stdio.h>
#include <windows.h>
__device__ inline float sninvdev(double dseed[])
{
const double norm = 2.328306549295728e-10;
const double m1 = 4294967087.0;
const double m2 = 4294944443.0;
const double a12 = 1403580.0;
const double a13n = 810728.0;
const double a21 = 527612.0;
const double a23n = 1370589.0;
int k;
double p1, p2;
float ans, p;
p1 = a12*dseed[1] - a13n*dseed[2];
k = p1 / m1;
p1 -= k*m1;
if (p1 < 0.0) p1 += m1;
dseed[2] = dseed[1]; dseed[1] = dseed[0]; dseed[0] = p1;
p2 = a21*dseed[3] - a23n*dseed[5];
k = p2 / m2;
p2 -= k*m2;
if (p2 < 0.0) p2 += m2;
dseed[5] = dseed[4]; dseed[4] = dseed[3]; dseed[3] = p2;
if (p1 <= p2) p = ((p1 - p2 + m1)*norm);
else p = ((p1 - p2)*norm);
if (p <= 0.0) {
ans = -100.0f;
}
else {
if (p >= 1.0) ans = 100.0f;
else ans = normcdfinvf(p);
}
return ans;
}
////////////////////////////////////////////////////////////////////////////////
//Process an array of nSim simulations of the SPX model on GPU
////////////////////////////////////////////////////////////////////////////////
__global__ void SimulatePathGPU(
int nSim, int nMat, int nSteps, float dt, float sqdt, float FX0, float v0,
float BCoef, float ACoef, float sigCoef, float rho, unsigned int *seeds,
int *Maturity, float *rd, float *rf, float *d_SimFX)
{
int iSim;
//Thread index
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
iSim = tid;
if (iSim < nSim) {
double dseed[6];
int i, j, k, jj, kk, jstart, jend;
float x, y, v, vnew, vavg, rhosq, z1, z2;
// extract seeds for this path from seeds
for (i = 0; i < 6; i++) dseed[i] = seeds[i + iSim * 6];
x = log(FX0);
v = v0;
y = log(v0*v0);
rhosq = sqrt(1.0 - rho * rho);
jstart = 1;
for (j = 1; j <= nMat; j++) {
jend = Maturity[j];
for (jj = jstart; jj <= jend; jj++) {
for (kk = 1; kk <= nSteps; kk++) {
z2 = sninvdev(dseed);
z1 = rho * z2 + rhosq * sninvdev(dseed);
// Simulate y
y = BCoef * y + ACoef + sigCoef * z2;
//yavg = 0.5*(ynew + y);
//y = ynew;
vnew = exp(0.5*y);
//vavg = 0.5*(v + vnew);
x += (rd[jj] - rf[jj] - 0.5*v*v)*dt + v * sqdt*z1;
v = vnew;
} // end of loop on kk for time steps within a day
} // End of loop on jj
d_SimFX[iSim*(nMat+1)+j] = exp(x);
jstart = jend + 1;
} // End of loop on j for nMat maturities
}
}
| ff7ccfe088c347a3db058660df91ceaf30f03245.cu | // GPU device code
// Using Monte Carlo MRG32k3a
//
#include <stdio.h>
#include <windows.h>
__device__ inline float sninvdev(double dseed[])
{
const double norm = 2.328306549295728e-10;
const double m1 = 4294967087.0;
const double m2 = 4294944443.0;
const double a12 = 1403580.0;
const double a13n = 810728.0;
const double a21 = 527612.0;
const double a23n = 1370589.0;
int k;
double p1, p2;
float ans, p;
p1 = a12*dseed[1] - a13n*dseed[2];
k = p1 / m1;
p1 -= k*m1;
if (p1 < 0.0) p1 += m1;
dseed[2] = dseed[1]; dseed[1] = dseed[0]; dseed[0] = p1;
p2 = a21*dseed[3] - a23n*dseed[5];
k = p2 / m2;
p2 -= k*m2;
if (p2 < 0.0) p2 += m2;
dseed[5] = dseed[4]; dseed[4] = dseed[3]; dseed[3] = p2;
if (p1 <= p2) p = ((p1 - p2 + m1)*norm);
else p = ((p1 - p2)*norm);
if (p <= 0.0) {
ans = -100.0f;
}
else {
if (p >= 1.0) ans = 100.0f;
else ans = normcdfinvf(p);
}
return ans;
}
////////////////////////////////////////////////////////////////////////////////
//Process an array of nSim simulations of the SPX model on GPU
////////////////////////////////////////////////////////////////////////////////
__global__ void SimulatePathGPU(
int nSim, int nMat, int nSteps, float dt, float sqdt, float FX0, float v0,
float BCoef, float ACoef, float sigCoef, float rho, unsigned int *seeds,
int *Maturity, float *rd, float *rf, float *d_SimFX)
{
int iSim;
//Thread index
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
iSim = tid;
if (iSim < nSim) {
double dseed[6];
int i, j, k, jj, kk, jstart, jend;
float x, y, v, vnew, vavg, rhosq, z1, z2;
// extract seeds for this path from seeds
for (i = 0; i < 6; i++) dseed[i] = seeds[i + iSim * 6];
x = log(FX0);
v = v0;
y = log(v0*v0);
rhosq = sqrt(1.0 - rho * rho);
jstart = 1;
for (j = 1; j <= nMat; j++) {
jend = Maturity[j];
for (jj = jstart; jj <= jend; jj++) {
for (kk = 1; kk <= nSteps; kk++) {
z2 = sninvdev(dseed);
z1 = rho * z2 + rhosq * sninvdev(dseed);
// Simulate y
y = BCoef * y + ACoef + sigCoef * z2;
//yavg = 0.5*(ynew + y);
//y = ynew;
vnew = exp(0.5*y);
//vavg = 0.5*(v + vnew);
x += (rd[jj] - rf[jj] - 0.5*v*v)*dt + v * sqdt*z1;
v = vnew;
} // end of loop on kk for time steps within a day
} // End of loop on jj
d_SimFX[iSim*(nMat+1)+j] = exp(x);
jstart = jend + 1;
} // End of loop on j for nMat maturities
}
}
|
94028c5407e3fe7c97b8c7a2cf65f5339b0549c7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* cg.cu
* Created on: July 22, 2016
* Author: Wei Tan ([email protected])
* CUDA kernels related to batch CG solver used in ALS
* CG solver: https://en.wikipedia.org/wiki/Conjugate_gradient_method
* Code optimized for F = 100, and on cc 3.5, 3.7 platforms. Also tested in cc 5.2
*/
#include "als.h"
#include "device_utilities.h"
#include "host_utilities.h"
#include <fstream>
#define SCAN_BATCH 24
#define CG_ERROR 1e-4
#undef DEBUG
//CG (iterative solve) kernel
//each block solves a A*x=b
__global__ void updateXWithCGKernel(float * A, float * x, float * b, const int batchSize, const int f, const float cgIter){
extern __shared__ float smem[];
float *sharedx = &smem[0];
float *sharedp = &smem[f];
float *sharedr = &smem[2*f];
float *sharedap = &smem[3*f];
float *rsold = &smem[4*f];
float *alpha = &smem[4*f+1];
float *rsnew = &smem[4*f+2];
float *beta = &smem[4*f+3];
//sharedx<--x
sharedx[threadIdx.x] = x[blockIdx.x*blockDim.x + threadIdx.x];
__syncthreads();
//r=b-A*x;
float temp = 0;
for(int i = 0; i < f; i++)
//this is math correct and coalesced because A is symmetric
temp += A[blockIdx.x*f*f + f*i + threadIdx.x]*sharedx[i];
sharedr[threadIdx.x] = b[blockIdx.x*blockDim.x + threadIdx.x] - temp;
//p=r;
sharedp[threadIdx.x] = sharedr[threadIdx.x];
//rsold=r'*r;
if(threadIdx.x == 0){
rsold[0] = 0;
}
temp = sharedr[threadIdx.x]
*sharedr[threadIdx.x];
blockReduceSumWithAtomics(rsold, temp);
//temp = blockReduceSum(shared, temp);
__syncthreads();
#ifdef DEBUG
if(threadIdx.x==0){
printf("***rsold:\n");
printf("rsold = %f \n", rsold[0]);
printf("***shared memory content after 1st blockReduceSum:\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedp[i]);
printf("\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedr[i]);
printf("\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedap[i]);
printf("\n");
}
#endif
for(int iter = 0; iter < cgIter; iter++){
//ap=A*p;
//WARN: set temp to zero since the next operation is +=!
temp = 0;
for(int i = 0; i < f; i++)
//this is math correct and coalesced because A is symmetric
temp += A[blockIdx.x*f*f + f*i + threadIdx.x]*sharedp[i];
sharedap[threadIdx.x] = temp;
#ifdef DEBUG
__syncthreads();
if(threadIdx.x==0){
printf("----------CG iteration %d \n", iter);
printf("***ap:\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedap[i]);
printf("\n");
printf("***shared memory content before 2rd blockReduceSum:\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedp[i]);
printf("\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedr[i]);
printf("\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedap[i]);
printf("\n");
}
#endif
if(threadIdx.x == 0){
rsnew[0] = 0;
}
//no need to have sync before blockReduce
//because there is a __syncthreads() in blockReduce
//pAp=p'*Ap
temp = sharedp[threadIdx.x]
*sharedap[threadIdx.x];
//temp = blockReduceSum(shared, temp);
blockReduceSumWithAtomics(rsnew, temp);
//sync needed, to let all atomicAdd threads completes
__syncthreads();
if(threadIdx.x == 0){
//pAp = temp;
//alpha=rsold/(p'*Ap); use rsnew to store pAp
alpha[0] = rsold[0]/rsnew[0];
#ifdef DEBUG
printf("***rsold:\n");
printf("rsold = %f \n", rsold[0]);
printf("***pAp:\n");
printf("pAp = %f \n", rsnew[0]);
printf("***alpha:\n");
printf("alpha = %f \n", alpha[0]);
#endif
rsnew[0] = 0;
}
//needed, aplpha[0] to be used by all threads
__syncthreads();
//x=x+alpha*p;
sharedx[threadIdx.x] =
sharedx[threadIdx.x] + alpha[0] * sharedp[threadIdx.x];
//r=r-alpha*Ap;
sharedr[threadIdx.x] =
sharedr[threadIdx.x] - alpha[0] * sharedap[threadIdx.x];
//NOT needed?
//__syncthreads();
#ifdef DEBUG
__syncthreads();
if(threadIdx.x==0){
printf("***shared memory content before 3rd blockReduceSum:\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedp[i]);
printf("\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedr[i]);
printf("\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedap[i]);
printf("\n");
}
#endif
//rsnew=r'*r;
/*
temp = sharedr[threadIdx.x]*sharedr[threadIdx.x];
temp = blockReduceSum(shared, temp);
__syncthreads();
if(threadIdx.x == 0){
rsnew[0] = temp;
}
*/
temp = sharedr[threadIdx.x]*sharedr[threadIdx.x];
blockReduceSumWithAtomics(rsnew, temp);
//WARN: has to have this sync!
__syncthreads();
#ifdef DEBUG
if(threadIdx.x==0){
printf("***rsnew:\n");
printf("rsnew = %f \n", rsnew[0]);
printf("***shared memory content after 3rd blockReduceSum:\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedp[i]);
printf("\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedr[i]);
printf("\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedap[i]);
printf("\n");
}
#endif
if(rsnew[0]<CG_ERROR)
break;
//NOT needed?
//__syncthreads();
//beta
if(threadIdx.x == 0){
beta[0] = rsnew[0]/rsold[0];
//rsold=rsnew;
rsold[0] = rsnew[0];
}
//need sync since every thread needs beta[0]
__syncthreads();
//p=r+(rsnew/rsold)*p;
sharedp[threadIdx.x] =
sharedr[threadIdx.x] + beta[0] * sharedp[threadIdx.x];
//need sync as every thread needs sharedp at the beginning of for
__syncthreads();
#ifdef DEBUG
__syncthreads();
if(threadIdx.x==0){
printf("***shared memory content after update p:\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedp[i]);
printf("\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedr[i]);
printf("\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedap[i]);
printf("\n");
}
__syncthreads();
#endif
}//end of CG iterations
//x<--sharedx
x[blockIdx.x*blockDim.x + threadIdx.x] = sharedx[threadIdx.x];
}
//CG (iterative solve) kernel
//each block solves a A*x=b and A in fp16
__global__ void updateXWithCGKernel3(half * A, float * x, float * b, const int batchSize, const int f, const float cgIter){
extern __shared__ float smem[];
float *sharedx = &smem[0];
float *sharedp = &smem[f];
float *sharedr = &smem[2*f];
float *sharedap = &smem[3*f];
float *rsold = &smem[4*f];
float *alpha = &smem[4*f+1];
float *rsnew = &smem[4*f+2];
float *beta = &smem[4*f+3];
//sharedx<--x
sharedx[threadIdx.x] = x[blockIdx.x*blockDim.x + threadIdx.x];
__syncthreads();
//r=b-A*x;
float temp = 0;
for(int i = 0; i < f; i++)
//this is math correct and coalesced because A is symmetric
temp += __half2float(A[blockIdx.x*f*f + f*i + threadIdx.x])*sharedx[i];
sharedr[threadIdx.x] = b[blockIdx.x*blockDim.x + threadIdx.x] - temp;
//p=r;
sharedp[threadIdx.x] = sharedr[threadIdx.x];
//rsold=r'*r;
if(threadIdx.x == 0){
rsold[0] = 0;
}
temp = sharedr[threadIdx.x]
*sharedr[threadIdx.x];
blockReduceSumWithAtomics(rsold, temp);
//temp = blockReduceSum(shared, temp);
__syncthreads();
#ifdef DEBUG
if(threadIdx.x==0){
printf("***rsold:\n");
printf("rsold = %f \n", rsold[0]);
printf("***shared memory content after 1st blockReduceSum:\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedp[i]);
printf("\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedr[i]);
printf("\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedap[i]);
printf("\n");
}
#endif
for(int iter = 0; iter < cgIter; iter++){
//ap=A*p;
//WARN: set temp to zero since the next operation is +=!
temp = 0;
for(int i = 0; i < f; i++)
//this is math correct and coalesced because A is symmetric
temp += __half2float(A[blockIdx.x*f*f + f*i + threadIdx.x])*sharedp[i];
sharedap[threadIdx.x] = temp;
#ifdef DEBUG
__syncthreads();
if(threadIdx.x==0){
printf("----------CG iteration %d \n", iter);
printf("***ap:\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedap[i]);
printf("\n");
printf("***shared memory content before 2rd blockReduceSum:\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedp[i]);
printf("\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedr[i]);
printf("\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedap[i]);
printf("\n");
}
#endif
if(threadIdx.x == 0){
rsnew[0] = 0;
}
//no need to have sync before blockReduce
//because there is a __syncthreads() in blockReduce
//pAp=p'*Ap
temp = sharedp[threadIdx.x]
*sharedap[threadIdx.x];
//temp = blockReduceSum(shared, temp);
blockReduceSumWithAtomics(rsnew, temp);
//sync needed, to let all atomicAdd threads completes
__syncthreads();
if(threadIdx.x == 0){
//pAp = temp;
//alpha=rsold/(p'*Ap); use rsnew to store pAp
alpha[0] = rsold[0]/rsnew[0];
#ifdef DEBUG
printf("***rsold:\n");
printf("rsold = %f \n", rsold[0]);
printf("***pAp:\n");
printf("pAp = %f \n", rsnew[0]);
printf("***alpha:\n");
printf("alpha = %f \n", alpha[0]);
#endif
rsnew[0] = 0;
}
//needed, aplpha[0] to be used by all threads
__syncthreads();
//x=x+alpha*p;
sharedx[threadIdx.x] =
sharedx[threadIdx.x] + alpha[0] * sharedp[threadIdx.x];
//r=r-alpha*Ap;
sharedr[threadIdx.x] =
sharedr[threadIdx.x] - alpha[0] * sharedap[threadIdx.x];
//NOT needed?
//__syncthreads();
#ifdef DEBUG
__syncthreads();
if(threadIdx.x==0){
printf("***shared memory content before 3rd blockReduceSum:\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedp[i]);
printf("\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedr[i]);
printf("\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedap[i]);
printf("\n");
}
#endif
//rsnew=r'*r;
/*
temp = sharedr[threadIdx.x]*sharedr[threadIdx.x];
temp = blockReduceSum(shared, temp);
__syncthreads();
if(threadIdx.x == 0){
rsnew[0] = temp;
}
*/
temp = sharedr[threadIdx.x]*sharedr[threadIdx.x];
blockReduceSumWithAtomics(rsnew, temp);
//WARN: has to have this sync!
__syncthreads();
#ifdef DEBUG
if(threadIdx.x==0){
printf("***rsnew:\n");
printf("rsnew = %f \n", rsnew[0]);
printf("***shared memory content after 3rd blockReduceSum:\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedp[i]);
printf("\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedr[i]);
printf("\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedap[i]);
printf("\n");
}
#endif
if(rsnew[0]<CG_ERROR)
break;
//NOT needed?
//__syncthreads();
//beta
if(threadIdx.x == 0){
beta[0] = rsnew[0]/rsold[0];
//rsold=rsnew;
rsold[0] = rsnew[0];
}
//need sync since every thread needs beta[0]
__syncthreads();
//p=r+(rsnew/rsold)*p;
sharedp[threadIdx.x] =
sharedr[threadIdx.x] + beta[0] * sharedp[threadIdx.x];
//need sync as every thread needs sharedp at the beginning of for
__syncthreads();
#ifdef DEBUG
__syncthreads();
if(threadIdx.x==0){
printf("***shared memory content after update p:\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedp[i]);
printf("\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedr[i]);
printf("\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedap[i]);
printf("\n");
}
__syncthreads();
#endif
}//end of CG iterations
//x<--sharedx
x[blockIdx.x*blockDim.x + threadIdx.x] = sharedx[threadIdx.x];
}
//blockDim.x=64 or 96 (two or three WARPs) instead of 100 -- WARP shuffle seems requiring this
__global__ void updateXWithCGKernel2(float * A, float * x, float * b, const int batchSize, const int f, const float cgIter){
extern __shared__ float smem[];
float *sharedx = &smem[0];
float *sharedp = &smem[f];
float *sharedr = &smem[2*f];
float *sharedap = &smem[3*f];
float *rsold = &smem[4*f];
float *alpha = &smem[4*f+1];
float *rsnew = &smem[4*f+2];
float *beta = &smem[4*f+3];
//sharedx<--x
for(int k = threadIdx.x; k < f; k += blockDim.x)
sharedx[k] = x[blockIdx.x*f + k];
__syncthreads();
//r=b-A*x;
float temp = 0;
for(int k = threadIdx.x; k < f; k += blockDim.x){
temp = 0;
for(int i = 0; i < f; i++)
temp += A[blockIdx.x*f*f + f*i + k]*sharedx[i];
sharedr[k] = b[blockIdx.x*f + k] - temp;
//p=r;
sharedp[k] = sharedr[k];
}
//rsold=r'*r;
if(threadIdx.x == 0){
rsold[0] = 0;
}
temp = 0;
for(int k = threadIdx.x; k < f; k += blockDim.x){
temp += sharedr[k]*sharedr[k];
}
blockReduceSumWithAtomics(rsold, temp);
//temp = blockReduceSum(shared, temp);
__syncthreads();
#ifdef DEBUG
if(threadIdx.x==0){
printf("***rsold:\n");
printf("rsold = %f \n", rsold[0]);
printf("***shared memory content after 1st blockReduceSum:\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedp[i]);
printf("\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedr[i]);
printf("\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedap[i]);
printf("\n");
}
#endif
for(int iter = 0; iter < cgIter; iter++){
//ap=A*p;
//WARN: set temp to zero since the next operation is +=!
for(int k = threadIdx.x; k < f; k += blockDim.x){
temp = 0;
for(int i = 0; i < f; i++)
temp += A[blockIdx.x*f*f + f*i + k]*sharedp[i];
sharedap[k] = temp;
}
#ifdef DEBUG
__syncthreads();
if(threadIdx.x==0){
printf("----------CG iteration %d \n", iter);
printf("***ap:\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedap[i]);
printf("\n");
printf("***shared memory content before 2rd blockReduceSum:\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedp[i]);
printf("\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedr[i]);
printf("\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedap[i]);
printf("\n");
}
#endif
if(threadIdx.x == 0){
rsnew[0] = 0;
}
//no need to have sync before blockReduce
//because there is a __syncthreads() in blockReduce
//pAp=p'*Ap
temp = 0;
for(int k = threadIdx.x; k < f; k += blockDim.x)
temp += sharedp[k]*sharedap[k];
//temp = blockReduceSum(shared, temp);
blockReduceSumWithAtomics(rsnew, temp);
//sync needed, to let all atomicAdd threads completes
__syncthreads();
if(threadIdx.x == 0){
//pAp = temp;
//alpha=rsold/(p'*Ap); use rsnew to store pAp
alpha[0] = rsold[0]/rsnew[0];
#ifdef DEBUG
printf("***rsold:\n");
printf("rsold = %f \n", rsold[0]);
printf("***pAp:\n");
printf("pAp = %f \n", rsnew[0]);
printf("***alpha:\n");
printf("alpha = %f \n", alpha[0]);
#endif
rsnew[0] = 0;
}
//needed, aplpha[0] to be used by all threads
__syncthreads();
for(int k = threadIdx.x; k < f; k += blockDim.x){
//x=x+alpha*p;
sharedx[k] =
sharedx[k] + alpha[0] * sharedp[k];
//r=r-alpha*Ap;
sharedr[k] =
sharedr[k] - alpha[0] * sharedap[k];
}
//NOT needed?
//__syncthreads();
#ifdef DEBUG
__syncthreads();
if(threadIdx.x==0){
printf("***shared memory content before 3rd blockReduceSum:\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedp[i]);
printf("\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedr[i]);
printf("\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedap[i]);
printf("\n");
}
#endif
//rsnew=r'*r;
/*
temp = sharedr[threadIdx.x]*sharedr[threadIdx.x];
temp = blockReduceSum(shared, temp);
__syncthreads();
if(threadIdx.x == 0){
rsnew[0] = temp;
}
*/
temp = 0;
for(int k = threadIdx.x; k < f; k += blockDim.x)
temp += sharedr[k]*sharedr[k];
blockReduceSumWithAtomics(rsnew, temp);
//WARN: has to have this sync!
__syncthreads();
#ifdef DEBUG
if(threadIdx.x==0){
printf("***rsnew:\n");
printf("rsnew = %f \n", rsnew[0]);
printf("***shared memory content after 3rd blockReduceSum:\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedp[i]);
printf("\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedr[i]);
printf("\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedap[i]);
printf("\n");
}
#endif
if(rsnew[0]<CG_ERROR)
break;
//NOT needed?
//__syncthreads();
//beta
if(threadIdx.x == 0){
beta[0] = rsnew[0]/rsold[0];
//rsold=rsnew;
rsold[0] = rsnew[0];
}
//need sync since every thread needs beta[0]
__syncthreads();
for(int k = threadIdx.x; k < f; k += blockDim.x)
//p=r+(rsnew/rsold)*p;
sharedp[k] =
sharedr[k] + beta[0] * sharedp[k];
//need sync as every thread needs sharedp at the beginning of for
__syncthreads();
#ifdef DEBUG
__syncthreads();
if(threadIdx.x==0){
printf("***shared memory content after update p:\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedp[i]);
printf("\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedr[i]);
printf("\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedap[i]);
printf("\n");
}
__syncthreads();
#endif
}//end of CG iterations
for(int k = threadIdx.x; k < f; k += blockDim.x)
//x<--sharedx
x[blockIdx.x*f + k] = sharedx[k];
}
void updateXWithCGHost_tt_fp16(float * A, float * x, float * b, const int batchSize, const int f, const float cgIter){
hipLaunchKernelGGL(( updateXWithCGKernel3), dim3(batchSize), dim3(f), (4*f+4)*sizeof(float), 0,
(half*)A, x, b, batchSize, f, cgIter);
hipDeviceSynchronize();
cudaCheckError();
#ifdef DEBUG
printf("***A[0]:\n");
float *h_A = new float[f * f];
float *A_fp32;
cudacall(hipMalloc((void** ) &A_fp32, f * f * sizeof(A_fp32[0])));
hipLaunchKernelGGL(( fp16Array2fp32Array), dim3((f*f-1)/1024 + 1), dim3(1024), 0, 0, A_fp32, (half*)A, f*f);
hipDeviceSynchronize();
cudaCheckError();
cudacall(hipMemcpy(h_A, A_fp32, f * f * sizeof(float), hipMemcpyDeviceToHost));
for(int i = 0; i < f*f; i++)
printf("%f ", h_A[i]);
printf("\n");
delete [] h_A;
cudacall(hipFree(A_fp32));
printf("***x[0]:\n");
float *h_x = new float[f];
cudacall(hipMemcpy(h_x, x, f * sizeof(float), hipMemcpyDeviceToHost));
for(int i = 0; i < f; i++)
printf("%f ", h_x[i]);
printf("\n");
delete [] h_x;
/*
printf("***b[0]:\n");
float *h_b = new float[f];
cudacall(hipMemcpy(h_b, b, f * sizeof(float), hipMemcpyDeviceToHost));
for(int i = 0; i < f; i++)
printf("%f ", h_b[i]);
printf("\n");
delete [] h_b;
*/
#endif
}
void updateXWithCGHost(float * A, float * x, float * b, const int batchSize, const int f, const float cgIter){
hipLaunchKernelGGL(( updateXWithCGKernel), dim3(batchSize), dim3(f), (4*f+4)*sizeof(float), 0,
//updateXWithCGKernel2, batchSize, 96, 4*f+4)*sizeof(float)>>>
(A, x, b, batchSize, f, cgIter);
hipDeviceSynchronize();
cudaCheckError();
#ifdef DEBUG
printf("***A[0]:\n");
float *h_A = new float[f * f];
float *A_fp32;
cudacall(hipMalloc((void** ) &A_fp32, f * f * sizeof(A_fp32[0])));
hipLaunchKernelGGL(( fp16Array2fp32Array), dim3((f*f-1)/1024 + 1), dim3(1024), 0, 0, A_fp32, (half*)A, f*f);
hipDeviceSynchronize();
cudaCheckError();
cudacall(hipMemcpy(h_A, A_fp32, f * f * sizeof(float), hipMemcpyDeviceToHost));
for(int i = 0; i < f*f; i++)
printf("%f ", h_A[i]);
printf("\n");
delete [] h_A;
cudacall(hipFree(A_fp32));
printf("***x[0]:\n");
float *h_x = new float[f];
cudacall(hipMemcpy(h_x, x, f * sizeof(float), hipMemcpyDeviceToHost));
for(int i = 0; i < f; i++)
printf("%f ", h_x[i]);
printf("\n");
delete [] h_x;
/*
printf("***b[0]:\n");
float *h_b = new float[f];
cudacall(hipMemcpy(h_b, b, f * sizeof(float), hipMemcpyDeviceToHost));
for(int i = 0; i < f; i++)
printf("%f ", h_b[i]);
printf("\n");
delete [] h_b;
*/
#endif
}
//fused kernel, use thetaT to update XT
__global__ void
__launch_bounds__(64)
alsUpdateFeature100(const int batch_offset,
const int* csrRowIndex, const int* csrColIndex, const float lambda, const int m, const int F,
const float* thetaT, float* XT, float* ythetaT, int cgIter) {
extern __shared__ float2 thetaTemp[];
int row = blockIdx.x + batch_offset;
if (row < m) {
//this block needs to handle end - start thetaT columns
int start = csrRowIndex[row];
int end = csrRowIndex[row + 1];
//slide through [start, end] by window size SCAN_BATCH
int iterations = (end - start - 1)/SCAN_BATCH + 1;
float temp0= 0, temp1= 0, temp2= 0, temp3= 0, temp4= 0, temp5= 0, temp6= 0, temp7= 0, temp8= 0, temp9 = 0;
float temp10= 0, temp11= 0, temp12= 0, temp13= 0, temp14= 0, temp15= 0, temp16= 0, temp17= 0, temp18= 0, temp19 = 0;
float temp20= 0, temp21= 0, temp22= 0, temp23= 0, temp24= 0, temp25= 0, temp26= 0, temp27= 0, temp28= 0, temp29 = 0;
float temp30= 0, temp31= 0, temp32= 0, temp33= 0, temp34= 0, temp35= 0, temp36= 0, temp37= 0, temp38= 0, temp39 = 0;
float temp40= 0, temp41= 0, temp42= 0, temp43= 0, temp44= 0, temp45= 0, temp46= 0, temp47= 0, temp48= 0, temp49 = 0;
float temp50= 0, temp51= 0, temp52= 0, temp53= 0, temp54= 0, temp55= 0, temp56= 0, temp57= 0, temp58= 0, temp59 = 0;
float temp60= 0, temp61= 0, temp62= 0, temp63= 0, temp64= 0, temp65= 0, temp66= 0, temp67= 0, temp68= 0, temp69 = 0;
float temp70= 0, temp71= 0, temp72= 0, temp73= 0, temp74= 0, temp75= 0, temp76= 0, temp77= 0, temp78= 0, temp79 = 0;
float temp80= 0, temp81= 0, temp82= 0, temp83= 0, temp84= 0, temp85= 0, temp86= 0, temp87= 0, temp88= 0, temp89 = 0;
float temp90= 0, temp91= 0, temp92= 0, temp93= 0, temp94= 0, temp95= 0, temp96= 0, temp97= 0, temp98= 0, temp99 = 0;
int tile_x = 0;
int tile_y = 0;
int tile = F/10;
for ( int i = 0; i < 10; i++){
int end = ((20-i)*(i+1))/2;
if(threadIdx.x < end){
tile_x = i * tile;
tile_y = (10 + threadIdx.x - end) * tile;
break;
}
}
//iteration: copy gmem-->smem; aggregate smem-->register
for (int iter = 0; iter < iterations; iter ++){
float2 theta;
//copy texture --> smem, and sync
//two layers: warp divergence unless we split at 32
//require 32 >= SCAN_BATCH
if(threadIdx.x < 2*32 ){
//int index = threadIdx.x;
int index = threadIdx.x - (threadIdx.x/32)*32; //0 to 31;
if(index < SCAN_BATCH){
if(iter*SCAN_BATCH + index < end - start){
//for (int k = 50*(threadIdx.x/32); k < 50*(threadIdx.x/32) + 50; k += 2){
//IMPORTANT: for loop has constant and identical start and end
if(threadIdx.x < 32){
for (int k = 0; k < 50; k += 2){
theta.x = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k]);
theta.y = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k+1]);
thetaTemp[index * F/2 + k/2] = theta;
}
}
else {
for (int k = 0; k < 50; k += 2){
theta.x = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k + 50]);
theta.y = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k + 51]);
thetaTemp[index * F/2 + k/2 + 25] = theta;
}
}
}
//must be the last iteration; no need to check
//not enough theta to copy, set zero
else
memset(&thetaTemp[index*F/2], 0, F*sizeof(float));
}
}
__syncthreads();
//tile: 10*10
if(threadIdx.x < 55 ){
for(int k = 0; k < SCAN_BATCH; k++){
accumulate_in_registers();
}
}
}
//end of iteration in copying from smem and aggregating in register
__syncthreads();
#ifdef DEBUG
if(blockIdx.x==0 && threadIdx.x==0){
printf("***temp 0~9: %f %f %f %f %f %f %f %f %f %f\n", temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8, temp9);
}
#endif
//newly added CG phase
//reuse the abundant shared memory
float *sharedx = (float*)&thetaTemp[0];
float *sharedp = (float*)&thetaTemp[50];
float *sharedr = (float*)&thetaTemp[100];
float *sharedap = (float*)&thetaTemp[150];
float *sharedax = (float*)&thetaTemp[200];
float *rsold = (float*)&thetaTemp[250];
float *alpha = (float*)&thetaTemp[251];
float *rsnew = (float*)&thetaTemp[252];
float *beta = (float*)&thetaTemp[253];
//sharedx<--x
for(int k = threadIdx.x; k < F; k += 64){
sharedx[k] = XT[blockIdx.x*F + k];
sharedax[k] = 0;
}
__syncthreads();
float temp = 0;
//only uses 55 threads for A*p and A*x
if(threadIdx.x < 55){
//add regularization
if(tile_x==tile_y){
temp = (end - start) * lambda;
temp0 += temp;
temp11 += temp;
temp22 += temp;
temp33 += temp;
temp44 += temp;
temp55 += temp;
temp66 += temp;
temp77 += temp;
temp88 += temp;
temp99 += temp;
}
#ifdef DEBUG
if(blockIdx.x==0 && threadIdx.x==0){
printf("***temp 0~9: %f %f %f %f %f %f %f %f %f %f\n", temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8, temp9);
}
#endif
//r=b-A*x;
//step1: ax=A*x
atomicAdd(&sharedax[tile_y], temp0*sharedx[tile_x] + temp10*sharedx[tile_x+1] + temp20*sharedx[tile_x+2] + temp30*sharedx[tile_x+3] +
temp40*sharedx[tile_x + 4] + temp50*sharedx[tile_x + 5] + temp60*sharedx[tile_x + 6] + temp70*sharedx[tile_x + 7] +
temp80*sharedx[tile_x + 8] + temp90*sharedx[tile_x + 9]);
atomicAdd(&sharedax[tile_y+1], temp1*sharedx[tile_x] + temp11*sharedx[tile_x+1] + temp21*sharedx[tile_x+2] + temp31*sharedx[tile_x+3] +
temp41*sharedx[tile_x+4] + temp51*sharedx[tile_x+5] + temp61*sharedx[tile_x+6] +
temp71*sharedx[tile_x+7] + temp81*sharedx[tile_x+8] + temp91*sharedx[tile_x+9]);
atomicAdd(&sharedax[tile_y+2], temp2*sharedx[tile_x] + temp12*sharedx[tile_x+1] + temp22*sharedx[tile_x+2] + temp32*sharedx[tile_x+3] +
temp42*sharedx[tile_x+4] + temp52*sharedx[tile_x+5] + temp62*sharedx[tile_x+6] +
temp72*sharedx[tile_x+7] + temp82*sharedx[tile_x+8] + temp92*sharedx[tile_x+9]);
atomicAdd(&sharedax[tile_y+3], temp3*sharedx[tile_x] + temp13*sharedx[tile_x+1] + temp23*sharedx[tile_x+2] + temp33*sharedx[tile_x+3] +
temp43*sharedx[tile_x+4] + temp53*sharedx[tile_x+5] + temp63*sharedx[tile_x+6] +
temp73*sharedx[tile_x+7] + temp83*sharedx[tile_x+8] + temp93*sharedx[tile_x+9]);
atomicAdd(&sharedax[tile_y+4], temp4*sharedx[tile_x] + temp14*sharedx[tile_x+1] + temp24*sharedx[tile_x+2] + temp34*sharedx[tile_x+3] +
temp44*sharedx[tile_x+4] + temp54*sharedx[tile_x+5] + temp64*sharedx[tile_x+6] +
temp74*sharedx[tile_x+7] + temp84*sharedx[tile_x+8] + temp94*sharedx[tile_x+9]);
atomicAdd(&sharedax[tile_y+5], temp5*sharedx[tile_x] + temp15*sharedx[tile_x+1] + temp25*sharedx[tile_x+2] + temp35*sharedx[tile_x+3] +
temp45*sharedx[tile_x+4] + temp55*sharedx[tile_x+5] + temp65*sharedx[tile_x+6] +
temp75*sharedx[tile_x+7] + temp85*sharedx[tile_x+8] + temp95*sharedx[tile_x+9]);
atomicAdd(&sharedax[tile_y+6], temp6*sharedx[tile_x] + temp16*sharedx[tile_x+1] + temp26*sharedx[tile_x+2] + temp36*sharedx[tile_x+3] +
temp46*sharedx[tile_x+4] + temp56*sharedx[tile_x+5] + temp66*sharedx[tile_x+6] +
temp76*sharedx[tile_x+7] + temp86*sharedx[tile_x+8] + temp96*sharedx[tile_x+9]);
atomicAdd(&sharedax[tile_y+7], temp7*sharedx[tile_x] + temp17*sharedx[tile_x+1] + temp27*sharedx[tile_x+2] + temp37*sharedx[tile_x+3] +
temp47*sharedx[tile_x+4] + temp57*sharedx[tile_x+5] + temp67*sharedx[tile_x+6] +
temp77*sharedx[tile_x+7] + temp87*sharedx[tile_x+8] + temp97*sharedx[tile_x+9]);
atomicAdd(&sharedax[tile_y+8], temp8*sharedx[tile_x] + temp18*sharedx[tile_x+1] + temp28*sharedx[tile_x+2] + temp38*sharedx[tile_x+3] +
temp48*sharedx[tile_x+4] + temp58*sharedx[tile_x+5] + temp68*sharedx[tile_x+6] +
temp78*sharedx[tile_x+7] + temp88*sharedx[tile_x+8] + temp98*sharedx[tile_x+9]);
atomicAdd(&sharedax[tile_y+9], temp9*sharedx[tile_x] + temp19*sharedx[tile_x+1] + temp29*sharedx[tile_x+2] + temp39*sharedx[tile_x+3] +
temp49*sharedx[tile_x+4] + temp59*sharedx[tile_x+5] + temp69*sharedx[tile_x+6] +
temp79*sharedx[tile_x+7] + temp89*sharedx[tile_x+8] + temp99*sharedx[tile_x+9]);
if(tile_x!=tile_y){
atomicAdd(&sharedax[tile_x], temp0*sharedx[tile_y] + temp1*sharedx[tile_y + 1] + temp2*sharedx[tile_y + 2] + temp3*sharedx[tile_y + 3] +
temp4*sharedx[tile_y + 4] + temp5*sharedx[tile_y + 5] + temp6*sharedx[tile_y + 6] + temp7*sharedx[tile_y + 7] +
temp8*sharedx[tile_y + 8] + temp9*sharedx[tile_y + 9]);
atomicAdd(&sharedax[tile_x+1], temp10*sharedx[tile_y] + temp11*sharedx[tile_y+1] + temp12*sharedx[tile_y+2] + temp13*sharedx[tile_y+3] +
temp14*sharedx[tile_y+4] + temp15*sharedx[tile_y+5] + temp16*sharedx[tile_y+6] +
temp17*sharedx[tile_y+7] + temp18*sharedx[tile_y+8] + temp19*sharedx[tile_y+9]);
atomicAdd(&sharedax[tile_x+2], temp20*sharedx[tile_y] + temp21*sharedx[tile_y+1] + temp22*sharedx[tile_y+2] + temp23*sharedx[tile_y+3] +
temp24*sharedx[tile_y+4] + temp25*sharedx[tile_y+5] + temp26*sharedx[tile_y+6] +
temp27*sharedx[tile_y+7] + temp28*sharedx[tile_y+8] + temp29*sharedx[tile_y+9]);
atomicAdd(&sharedax[tile_x+3], temp30*sharedx[tile_y] + temp31*sharedx[tile_y+1] + temp32*sharedx[tile_y+2] + temp33*sharedx[tile_y+3] +
temp34*sharedx[tile_y+4] + temp35*sharedx[tile_y+5] + temp36*sharedx[tile_y+6] +
temp37*sharedx[tile_y+7] + temp38*sharedx[tile_y+8] + temp39*sharedx[tile_y+9]);
atomicAdd(&sharedax[tile_x+4], temp40*sharedx[tile_y] + temp41*sharedx[tile_y+1] + temp42*sharedx[tile_y+2] + temp43*sharedx[tile_y+3] +
temp44*sharedx[tile_y+4] + temp45*sharedx[tile_y+5] + temp46*sharedx[tile_y+6] +
temp47*sharedx[tile_y+7] + temp48*sharedx[tile_y+8] + temp49*sharedx[tile_y+9]);
atomicAdd(&sharedax[tile_x+5], temp50*sharedx[tile_y] + temp51*sharedx[tile_y+1] + temp52*sharedx[tile_y+2] + temp53*sharedx[tile_y+3] +
temp54*sharedx[tile_y+4] + temp55*sharedx[tile_y+5] + temp56*sharedx[tile_y+6] +
temp57*sharedx[tile_y+7] + temp58*sharedx[tile_y+8] + temp59*sharedx[tile_y+9]);
atomicAdd(&sharedax[tile_x+6], temp60*sharedx[tile_y] + temp61*sharedx[tile_y+1] + temp62*sharedx[tile_y+2] + temp63*sharedx[tile_y+3] +
temp64*sharedx[tile_y+4] + temp65*sharedx[tile_y+5] + temp66*sharedx[tile_y+6] +
temp67*sharedx[tile_y+7] + temp68*sharedx[tile_y+8] + temp69*sharedx[tile_y+9]);
atomicAdd(&sharedax[tile_x+7], temp70*sharedx[tile_y] + temp71*sharedx[tile_y+1] + temp72*sharedx[tile_y+2] + temp73*sharedx[tile_y+3] +
temp74*sharedx[tile_y+4] + temp75*sharedx[tile_y+5] + temp76*sharedx[tile_y+6] +
temp77*sharedx[tile_y+7] + temp78*sharedx[tile_y+8] + temp79*sharedx[tile_y+9]);
atomicAdd(&sharedax[tile_x+8], temp80*sharedx[tile_y] + temp81*sharedx[tile_y+1] + temp82*sharedx[tile_y+2] + temp83*sharedx[tile_y+3] +
temp84*sharedx[tile_y+4] + temp85*sharedx[tile_y+5] + temp86*sharedx[tile_y+6] +
temp87*sharedx[tile_y+7] + temp88*sharedx[tile_y+8] + temp89*sharedx[tile_y+9]);
atomicAdd(&sharedax[tile_x+9], temp90*sharedx[tile_y] + temp91*sharedx[tile_y+1] + temp92*sharedx[tile_y+2] + temp93*sharedx[tile_y+3] +
temp94*sharedx[tile_y+4] + temp95*sharedx[tile_y+5] + temp96*sharedx[tile_y+6] +
temp97*sharedx[tile_y+7] + temp98*sharedx[tile_y+8] + temp99*sharedx[tile_y+9]);
}
}
__syncthreads();
#ifdef DEBUG
if(blockIdx.x==0 && threadIdx.x==0){
printf("***x:\n");
for(int i = 0; i < 100; i++)
printf("%f ", sharedx[i]);
printf("\n\n");
printf("***r=Ax:\n");
for(int i = 0; i < 100; i++)
printf("%f ", sharedax[i]);
printf("\n\n");
}
#endif
for(int k = threadIdx.x; k < F; k += 64){
//r=b-Ax
sharedr[k] = ythetaT[blockIdx.x*blockDim.x + k] - sharedax[k];
//p=r;
sharedp[k] = sharedr[k];
}
//rsold=r'*r;
if(threadIdx.x == 0){
rsold[0] = 0;
}
for(int k = threadIdx.x; k < F; k += 64){
temp += sharedr[k]*sharedr[k];
}
blockReduceSumWithAtomics(rsold, temp);
__syncthreads();
#ifdef DEBUG
if(blockIdx.x==0 && threadIdx.x==0){
printf("***rsold:\n");
printf("rsold = %f \n", rsold[0]);
printf("***shared memory content after 1st blockReduceSum:\n");
for(int i = 0; i < 100; i++)
printf("%f ", sharedx[i]);
printf("\n");
for(int i = 0; i < 100; i++)
printf("%f ", sharedax[i]);
printf("\n\n");
for(int i = 0; i < 100; i++)
printf("%f ", sharedp[i]);
printf("\n");
for(int i = 0; i < 100; i++)
printf("%f ", sharedr[i]);
printf("\n");
for(int i = 0; i < 100; i++)
printf("%f ", sharedap[i]);
printf("\n");
}
#endif
///*
//CG iterations
for(int iter = 0; iter < cgIter; iter++){
//ap=A*p;
for(int k = threadIdx.x; k < F; k += 64)
sharedap[k] = 0;
__syncthreads();
//only uses 55 threads for A*p and A*x
if(threadIdx.x < 55){
atomicAdd(&sharedap[tile_y], temp0*sharedp[tile_x] + temp10*sharedp[tile_x+1] + temp20*sharedp[tile_x+2] + temp30*sharedp[tile_x+3] +
temp40*sharedp[tile_x + 4] + temp50*sharedp[tile_x + 5] + temp60*sharedp[tile_x + 6] + temp70*sharedp[tile_x + 7] +
temp80*sharedp[tile_x + 8] + temp90*sharedp[tile_x + 9]);
atomicAdd(&sharedap[tile_y+1], temp1*sharedp[tile_x] + temp11*sharedp[tile_x+1] + temp21*sharedp[tile_x+2] + temp31*sharedp[tile_x+3] +
temp41*sharedp[tile_x+4] + temp51*sharedp[tile_x+5] + temp61*sharedp[tile_x+6] +
temp71*sharedp[tile_x+7] + temp81*sharedp[tile_x+8] + temp91*sharedp[tile_x+9]);
atomicAdd(&sharedap[tile_y+2], temp2*sharedp[tile_x] + temp12*sharedp[tile_x+1] + temp22*sharedp[tile_x+2] + temp32*sharedp[tile_x+3] +
temp42*sharedp[tile_x+4] + temp52*sharedp[tile_x+5] + temp62*sharedp[tile_x+6] +
temp72*sharedp[tile_x+7] + temp82*sharedp[tile_x+8] + temp92*sharedp[tile_x+9]);
atomicAdd(&sharedap[tile_y+3], temp3*sharedp[tile_x] + temp13*sharedp[tile_x+1] + temp23*sharedp[tile_x+2] + temp33*sharedp[tile_x+3] +
temp43*sharedp[tile_x+4] + temp53*sharedp[tile_x+5] + temp63*sharedp[tile_x+6] +
temp73*sharedp[tile_x+7] + temp83*sharedp[tile_x+8] + temp93*sharedp[tile_x+9]);
atomicAdd(&sharedap[tile_y+4], temp4*sharedp[tile_x] + temp14*sharedp[tile_x+1] + temp24*sharedp[tile_x+2] + temp34*sharedp[tile_x+3] +
temp44*sharedp[tile_x+4] + temp54*sharedp[tile_x+5] + temp64*sharedp[tile_x+6] +
temp74*sharedp[tile_x+7] + temp84*sharedp[tile_x+8] + temp94*sharedp[tile_x+9]);
atomicAdd(&sharedap[tile_y+5], temp5*sharedp[tile_x] + temp15*sharedp[tile_x+1] + temp25*sharedp[tile_x+2] + temp35*sharedp[tile_x+3] +
temp45*sharedp[tile_x+4] + temp55*sharedp[tile_x+5] + temp65*sharedp[tile_x+6] +
temp75*sharedp[tile_x+7] + temp85*sharedp[tile_x+8] + temp95*sharedp[tile_x+9]);
atomicAdd(&sharedap[tile_y+6], temp6*sharedp[tile_x] + temp16*sharedp[tile_x+1] + temp26*sharedp[tile_x+2] + temp36*sharedp[tile_x+3] +
temp46*sharedp[tile_x+4] + temp56*sharedp[tile_x+5] + temp66*sharedp[tile_x+6] +
temp76*sharedp[tile_x+7] + temp86*sharedp[tile_x+8] + temp96*sharedp[tile_x+9]);
atomicAdd(&sharedap[tile_y+7], temp7*sharedp[tile_x] + temp17*sharedp[tile_x+1] + temp27*sharedp[tile_x+2] + temp37*sharedp[tile_x+3] +
temp47*sharedp[tile_x+4] + temp57*sharedp[tile_x+5] + temp67*sharedp[tile_x+6] +
temp77*sharedp[tile_x+7] + temp87*sharedp[tile_x+8] + temp97*sharedp[tile_x+9]);
atomicAdd(&sharedap[tile_y+8], temp8*sharedp[tile_x] + temp18*sharedp[tile_x+1] + temp28*sharedp[tile_x+2] + temp38*sharedp[tile_x+3] +
temp48*sharedp[tile_x+4] + temp58*sharedp[tile_x+5] + temp68*sharedp[tile_x+6] +
temp78*sharedp[tile_x+7] + temp88*sharedp[tile_x+8] + temp98*sharedp[tile_x+9]);
atomicAdd(&sharedap[tile_y+9], temp9*sharedp[tile_x] + temp19*sharedp[tile_x+1] + temp29*sharedp[tile_x+2] + temp39*sharedp[tile_x+3] +
temp49*sharedp[tile_x+4] + temp59*sharedp[tile_x+5] + temp69*sharedp[tile_x+6] +
temp79*sharedp[tile_x+7] + temp89*sharedp[tile_x+8] + temp99*sharedp[tile_x+9]);
if(tile_x!=tile_y){
atomicAdd(&sharedap[tile_x], temp0*sharedp[tile_y] + temp1*sharedp[tile_y + 1] + temp2*sharedp[tile_y + 2] + temp3*sharedp[tile_y + 3] +
temp4*sharedp[tile_y + 4] + temp5*sharedp[tile_y + 5] + temp6*sharedp[tile_y + 6] + temp7*sharedp[tile_y + 7] +
temp8*sharedp[tile_y + 8] + temp9*sharedp[tile_y + 9]);
atomicAdd(&sharedap[tile_x+1], temp10*sharedp[tile_y] + temp11*sharedp[tile_y+1] + temp12*sharedp[tile_y+2] + temp13*sharedp[tile_y+3] +
temp14*sharedp[tile_y+4] + temp15*sharedp[tile_y+5] + temp16*sharedp[tile_y+6] +
temp17*sharedp[tile_y+7] + temp18*sharedp[tile_y+8] + temp19*sharedp[tile_y+9]);
atomicAdd(&sharedap[tile_x+2], temp20*sharedp[tile_y] + temp21*sharedp[tile_y+1] + temp22*sharedp[tile_y+2] + temp23*sharedp[tile_y+3] +
temp24*sharedp[tile_y+4] + temp25*sharedp[tile_y+5] + temp26*sharedp[tile_y+6] +
temp27*sharedp[tile_y+7] + temp28*sharedp[tile_y+8] + temp29*sharedp[tile_y+9]);
atomicAdd(&sharedap[tile_x+3], temp30*sharedp[tile_y] + temp31*sharedp[tile_y+1] + temp32*sharedp[tile_y+2] + temp33*sharedp[tile_y+3] +
temp34*sharedp[tile_y+4] + temp35*sharedp[tile_y+5] + temp36*sharedp[tile_y+6] +
temp37*sharedp[tile_y+7] + temp38*sharedp[tile_y+8] + temp39*sharedp[tile_y+9]);
atomicAdd(&sharedap[tile_x+4], temp40*sharedp[tile_y] + temp41*sharedp[tile_y+1] + temp42*sharedp[tile_y+2] + temp43*sharedp[tile_y+3] +
temp44*sharedp[tile_y+4] + temp45*sharedp[tile_y+5] + temp46*sharedp[tile_y+6] +
temp47*sharedp[tile_y+7] + temp48*sharedp[tile_y+8] + temp49*sharedp[tile_y+9]);
atomicAdd(&sharedap[tile_x+5], temp50*sharedp[tile_y] + temp51*sharedp[tile_y+1] + temp52*sharedp[tile_y+2] + temp53*sharedp[tile_y+3] +
temp54*sharedp[tile_y+4] + temp55*sharedp[tile_y+5] + temp56*sharedp[tile_y+6] +
temp57*sharedp[tile_y+7] + temp58*sharedp[tile_y+8] + temp59*sharedp[tile_y+9]);
atomicAdd(&sharedap[tile_x+6], temp60*sharedp[tile_y] + temp61*sharedp[tile_y+1] + temp62*sharedp[tile_y+2] + temp63*sharedp[tile_y+3] +
temp64*sharedp[tile_y+4] + temp65*sharedp[tile_y+5] + temp66*sharedp[tile_y+6] +
temp67*sharedp[tile_y+7] + temp68*sharedp[tile_y+8] + temp69*sharedp[tile_y+9]);
atomicAdd(&sharedap[tile_x+7], temp70*sharedp[tile_y] + temp71*sharedp[tile_y+1] + temp72*sharedp[tile_y+2] + temp73*sharedp[tile_y+3] +
temp74*sharedp[tile_y+4] + temp75*sharedp[tile_y+5] + temp76*sharedp[tile_y+6] +
temp77*sharedp[tile_y+7] + temp78*sharedp[tile_y+8] + temp79*sharedp[tile_y+9]);
atomicAdd(&sharedap[tile_x+8], temp80*sharedp[tile_y] + temp81*sharedp[tile_y+1] + temp82*sharedp[tile_y+2] + temp83*sharedp[tile_y+3] +
temp84*sharedp[tile_y+4] + temp85*sharedp[tile_y+5] + temp86*sharedp[tile_y+6] +
temp87*sharedp[tile_y+7] + temp88*sharedp[tile_y+8] + temp89*sharedp[tile_y+9]);
atomicAdd(&sharedap[tile_x+9], temp90*sharedp[tile_y] + temp91*sharedp[tile_y+1] + temp92*sharedp[tile_y+2] + temp93*sharedp[tile_y+3] +
temp94*sharedp[tile_y+4] + temp95*sharedp[tile_y+5] + temp96*sharedp[tile_y+6] +
temp97*sharedp[tile_y+7] + temp98*sharedp[tile_y+8] + temp99*sharedp[tile_y+9]);
}
}
__syncthreads();
#ifdef DEBUG
if(blockIdx.x==0 && threadIdx.x==0){
printf("----------CG iteration %d \n", iter);
printf("***ap:\n");
for(int i = 0; i < F; i++)
printf("%f ", sharedap[i]);
printf("\n\n");
printf("***shared memory content before 2rd blockReduceSum:\n");
for(int i = 0; i < F; i++)
printf("%f ", sharedp[i]);
printf("\n\n");
for(int i = 0; i < F; i++)
printf("%f ", sharedr[i]);
printf("\n\n");
for(int i = 0; i < F; i++)
printf("%f ", sharedap[i]);
printf("\n\n");
}
#endif
if(threadIdx.x == 0){
rsnew[0] = 0;
}
//no need to have sync before blockReduce
//because there is a __syncthreads() in blockReduce
//pAp=p'*Ap
temp = 0;
for(int k = threadIdx.x; k < F; k += 64)
temp += sharedp[k]*sharedap[k];
//temp = blockReduceSum(shared, temp);
blockReduceSumWithAtomics(rsnew, temp);
//sync needed, to let all atomicAdd threads completes
__syncthreads();
if(threadIdx.x == 0){
//pAp = temp;
//alpha=rsold/(p'*Ap); use rsnew to store pAp
alpha[0] = rsold[0]/rsnew[0];
#ifdef DEBUG
if(blockIdx.x==0){
printf("***rsold:\n");
printf("rsold = %f \n", rsold[0]);
printf("***pAp:\n");
printf("pAp = %f \n", rsnew[0]);
printf("***alpha:\n");
printf("alpha = %f \n", alpha[0]);
}
#endif
rsnew[0] = 0;
}
//needed, aplpha[0] to be used by all threads
__syncthreads();
for(int k = threadIdx.x; k < F; k += 64){
//x=x+alpha*p;
sharedx[k] = sharedx[k] + alpha[0] * sharedp[k];
//r=r-alpha*Ap;
sharedr[k] = sharedr[k] - alpha[0] * sharedap[k];
//NOT needed?
//__syncthreads();
}
__syncthreads();
#ifdef DEBUG
if(blockIdx.x==0 && threadIdx.x==0){
printf("***shared memory content before 3rd blockReduceSum:\n");
for(int i = 0; i < F; i++)
printf("%f ", sharedp[i]);
printf("\n");
for(int i = 0; i < F; i++)
printf("%f ", sharedr[i]);
printf("\n");
for(int i = 0; i < F; i++)
printf("%f ", sharedap[i]);
printf("\n");
}
#endif
//rsnew=r'*r;
temp = 0;
for(int k = threadIdx.x; k < F; k += 64)
temp += sharedr[k]*sharedr[k];
blockReduceSumWithAtomics(rsnew, temp);
//WARN: has to have this sync!
__syncthreads();
#ifdef DEBUG
if(blockIdx.x==0 && threadIdx.x==0){
printf("***rsnew:\n");
printf("rsnew = %f \n", rsnew[0]);
printf("***shared memory content after 3rd blockReduceSum:\n");
for(int i = 0; i < F; i++)
printf("%f ", sharedp[i]);
printf("\n");
for(int i = 0; i < F; i++)
printf("%f ", sharedr[i]);
printf("\n");
for(int i = 0; i < F; i++)
printf("%f ", sharedap[i]);
printf("\n");
}
#endif
if(rsnew[0]<CG_ERROR)
break;
//NOT needed?
//__syncthreads();
//beta
if(threadIdx.x == 0){
beta[0] = rsnew[0]/rsold[0];
//rsold=rsnew;
rsold[0] = rsnew[0];
}
//need sync since every thread needs beta[0]
__syncthreads();
//p=r+(rsnew/rsold)*p;
for(int k = threadIdx.x; k < F; k += 64)
sharedp[k] = sharedr[k] + beta[0] * sharedp[k];
//need sync as every thread needs sharedp at the beginning of for
__syncthreads();
#ifdef DEBUG
__syncthreads();
if(blockIdx.x==0 && threadIdx.x==0){
printf("***shared memory content after update p:\n");
for(int i = 0; i < F; i++)
printf("%f ", sharedp[i]);
printf("\n");
for(int i = 0; i < F; i++)
printf("%f ", sharedr[i]);
printf("\n");
for(int i = 0; i < F; i++)
printf("%f ", sharedap[i]);
printf("\n");
}
__syncthreads();
#endif
}//end of CG iterations
//x<--sharedx
for(int k = threadIdx.x; k < F; k += 64)
XT[blockIdx.x*F + k] = sharedx[k];
//*/
}
}
void alsUpdateFeature100Host(const int batch_offset,
const int* csrRowIndex, const int* csrColIndex, const float lambda, const int m, const int F,
const float* __restrict__ thetaT, float* XT, float* ythetaT, int cgIter){
hipLaunchKernelGGL(( alsUpdateFeature100), dim3(m), dim3(64), SCAN_BATCH * F/2*sizeof(float2), 0,
batch_offset, csrRowIndex, csrColIndex, lambda, m, F, thetaT, XT, ythetaT, cgIter);
hipDeviceSynchronize();
cudaCheckError();
} | 94028c5407e3fe7c97b8c7a2cf65f5339b0549c7.cu | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* cg.cu
* Created on: July 22, 2016
* Author: Wei Tan ([email protected])
* CUDA kernels related to batch CG solver used in ALS
* CG solver: https://en.wikipedia.org/wiki/Conjugate_gradient_method
* Code optimized for F = 100, and on cc 3.5, 3.7 platforms. Also tested in cc 5.2
*/
#include "als.h"
#include "device_utilities.h"
#include "host_utilities.h"
#include <fstream>
#define SCAN_BATCH 24
#define CG_ERROR 1e-4
#undef DEBUG
//CG (iterative solve) kernel
//each block solves a A*x=b
__global__ void updateXWithCGKernel(float * A, float * x, float * b, const int batchSize, const int f, const float cgIter){
extern __shared__ float smem[];
float *sharedx = &smem[0];
float *sharedp = &smem[f];
float *sharedr = &smem[2*f];
float *sharedap = &smem[3*f];
float *rsold = &smem[4*f];
float *alpha = &smem[4*f+1];
float *rsnew = &smem[4*f+2];
float *beta = &smem[4*f+3];
//sharedx<--x
sharedx[threadIdx.x] = x[blockIdx.x*blockDim.x + threadIdx.x];
__syncthreads();
//r=b-A*x;
float temp = 0;
for(int i = 0; i < f; i++)
//this is math correct and coalesced because A is symmetric
temp += A[blockIdx.x*f*f + f*i + threadIdx.x]*sharedx[i];
sharedr[threadIdx.x] = b[blockIdx.x*blockDim.x + threadIdx.x] - temp;
//p=r;
sharedp[threadIdx.x] = sharedr[threadIdx.x];
//rsold=r'*r;
if(threadIdx.x == 0){
rsold[0] = 0;
}
temp = sharedr[threadIdx.x]
*sharedr[threadIdx.x];
blockReduceSumWithAtomics(rsold, temp);
//temp = blockReduceSum(shared, temp);
__syncthreads();
#ifdef DEBUG
if(threadIdx.x==0){
printf("***rsold:\n");
printf("rsold = %f \n", rsold[0]);
printf("***shared memory content after 1st blockReduceSum:\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedp[i]);
printf("\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedr[i]);
printf("\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedap[i]);
printf("\n");
}
#endif
for(int iter = 0; iter < cgIter; iter++){
//ap=A*p;
//WARN: set temp to zero since the next operation is +=!
temp = 0;
for(int i = 0; i < f; i++)
//this is math correct and coalesced because A is symmetric
temp += A[blockIdx.x*f*f + f*i + threadIdx.x]*sharedp[i];
sharedap[threadIdx.x] = temp;
#ifdef DEBUG
__syncthreads();
if(threadIdx.x==0){
printf("----------CG iteration %d \n", iter);
printf("***ap:\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedap[i]);
printf("\n");
printf("***shared memory content before 2rd blockReduceSum:\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedp[i]);
printf("\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedr[i]);
printf("\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedap[i]);
printf("\n");
}
#endif
if(threadIdx.x == 0){
rsnew[0] = 0;
}
//no need to have sync before blockReduce
//because there is a __syncthreads() in blockReduce
//pAp=p'*Ap
temp = sharedp[threadIdx.x]
*sharedap[threadIdx.x];
//temp = blockReduceSum(shared, temp);
blockReduceSumWithAtomics(rsnew, temp);
//sync needed, to let all atomicAdd threads completes
__syncthreads();
if(threadIdx.x == 0){
//pAp = temp;
//alpha=rsold/(p'*Ap); use rsnew to store pAp
alpha[0] = rsold[0]/rsnew[0];
#ifdef DEBUG
printf("***rsold:\n");
printf("rsold = %f \n", rsold[0]);
printf("***pAp:\n");
printf("pAp = %f \n", rsnew[0]);
printf("***alpha:\n");
printf("alpha = %f \n", alpha[0]);
#endif
rsnew[0] = 0;
}
//needed, aplpha[0] to be used by all threads
__syncthreads();
//x=x+alpha*p;
sharedx[threadIdx.x] =
sharedx[threadIdx.x] + alpha[0] * sharedp[threadIdx.x];
//r=r-alpha*Ap;
sharedr[threadIdx.x] =
sharedr[threadIdx.x] - alpha[0] * sharedap[threadIdx.x];
//NOT needed?
//__syncthreads();
#ifdef DEBUG
__syncthreads();
if(threadIdx.x==0){
printf("***shared memory content before 3rd blockReduceSum:\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedp[i]);
printf("\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedr[i]);
printf("\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedap[i]);
printf("\n");
}
#endif
//rsnew=r'*r;
/*
temp = sharedr[threadIdx.x]*sharedr[threadIdx.x];
temp = blockReduceSum(shared, temp);
__syncthreads();
if(threadIdx.x == 0){
rsnew[0] = temp;
}
*/
temp = sharedr[threadIdx.x]*sharedr[threadIdx.x];
blockReduceSumWithAtomics(rsnew, temp);
//WARN: has to have this sync!
__syncthreads();
#ifdef DEBUG
if(threadIdx.x==0){
printf("***rsnew:\n");
printf("rsnew = %f \n", rsnew[0]);
printf("***shared memory content after 3rd blockReduceSum:\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedp[i]);
printf("\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedr[i]);
printf("\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedap[i]);
printf("\n");
}
#endif
if(rsnew[0]<CG_ERROR)
break;
//NOT needed?
//__syncthreads();
//beta
if(threadIdx.x == 0){
beta[0] = rsnew[0]/rsold[0];
//rsold=rsnew;
rsold[0] = rsnew[0];
}
//need sync since every thread needs beta[0]
__syncthreads();
//p=r+(rsnew/rsold)*p;
sharedp[threadIdx.x] =
sharedr[threadIdx.x] + beta[0] * sharedp[threadIdx.x];
//need sync as every thread needs sharedp at the beginning of for
__syncthreads();
#ifdef DEBUG
__syncthreads();
if(threadIdx.x==0){
printf("***shared memory content after update p:\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedp[i]);
printf("\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedr[i]);
printf("\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedap[i]);
printf("\n");
}
__syncthreads();
#endif
}//end of CG iterations
//x<--sharedx
x[blockIdx.x*blockDim.x + threadIdx.x] = sharedx[threadIdx.x];
}
//CG (iterative solve) kernel
//each block solves a A*x=b and A in fp16
__global__ void updateXWithCGKernel3(half * A, float * x, float * b, const int batchSize, const int f, const float cgIter){
extern __shared__ float smem[];
float *sharedx = &smem[0];
float *sharedp = &smem[f];
float *sharedr = &smem[2*f];
float *sharedap = &smem[3*f];
float *rsold = &smem[4*f];
float *alpha = &smem[4*f+1];
float *rsnew = &smem[4*f+2];
float *beta = &smem[4*f+3];
//sharedx<--x
sharedx[threadIdx.x] = x[blockIdx.x*blockDim.x + threadIdx.x];
__syncthreads();
//r=b-A*x;
float temp = 0;
for(int i = 0; i < f; i++)
//this is math correct and coalesced because A is symmetric
temp += __half2float(A[blockIdx.x*f*f + f*i + threadIdx.x])*sharedx[i];
sharedr[threadIdx.x] = b[blockIdx.x*blockDim.x + threadIdx.x] - temp;
//p=r;
sharedp[threadIdx.x] = sharedr[threadIdx.x];
//rsold=r'*r;
if(threadIdx.x == 0){
rsold[0] = 0;
}
temp = sharedr[threadIdx.x]
*sharedr[threadIdx.x];
blockReduceSumWithAtomics(rsold, temp);
//temp = blockReduceSum(shared, temp);
__syncthreads();
#ifdef DEBUG
if(threadIdx.x==0){
printf("***rsold:\n");
printf("rsold = %f \n", rsold[0]);
printf("***shared memory content after 1st blockReduceSum:\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedp[i]);
printf("\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedr[i]);
printf("\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedap[i]);
printf("\n");
}
#endif
for(int iter = 0; iter < cgIter; iter++){
//ap=A*p;
//WARN: set temp to zero since the next operation is +=!
temp = 0;
for(int i = 0; i < f; i++)
//this is math correct and coalesced because A is symmetric
temp += __half2float(A[blockIdx.x*f*f + f*i + threadIdx.x])*sharedp[i];
sharedap[threadIdx.x] = temp;
#ifdef DEBUG
__syncthreads();
if(threadIdx.x==0){
printf("----------CG iteration %d \n", iter);
printf("***ap:\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedap[i]);
printf("\n");
printf("***shared memory content before 2rd blockReduceSum:\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedp[i]);
printf("\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedr[i]);
printf("\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedap[i]);
printf("\n");
}
#endif
if(threadIdx.x == 0){
rsnew[0] = 0;
}
//no need to have sync before blockReduce
//because there is a __syncthreads() in blockReduce
//pAp=p'*Ap
temp = sharedp[threadIdx.x]
*sharedap[threadIdx.x];
//temp = blockReduceSum(shared, temp);
blockReduceSumWithAtomics(rsnew, temp);
//sync needed, to let all atomicAdd threads completes
__syncthreads();
if(threadIdx.x == 0){
//pAp = temp;
//alpha=rsold/(p'*Ap); use rsnew to store pAp
alpha[0] = rsold[0]/rsnew[0];
#ifdef DEBUG
printf("***rsold:\n");
printf("rsold = %f \n", rsold[0]);
printf("***pAp:\n");
printf("pAp = %f \n", rsnew[0]);
printf("***alpha:\n");
printf("alpha = %f \n", alpha[0]);
#endif
rsnew[0] = 0;
}
//needed, aplpha[0] to be used by all threads
__syncthreads();
//x=x+alpha*p;
sharedx[threadIdx.x] =
sharedx[threadIdx.x] + alpha[0] * sharedp[threadIdx.x];
//r=r-alpha*Ap;
sharedr[threadIdx.x] =
sharedr[threadIdx.x] - alpha[0] * sharedap[threadIdx.x];
//NOT needed?
//__syncthreads();
#ifdef DEBUG
__syncthreads();
if(threadIdx.x==0){
printf("***shared memory content before 3rd blockReduceSum:\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedp[i]);
printf("\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedr[i]);
printf("\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedap[i]);
printf("\n");
}
#endif
//rsnew=r'*r;
/*
temp = sharedr[threadIdx.x]*sharedr[threadIdx.x];
temp = blockReduceSum(shared, temp);
__syncthreads();
if(threadIdx.x == 0){
rsnew[0] = temp;
}
*/
temp = sharedr[threadIdx.x]*sharedr[threadIdx.x];
blockReduceSumWithAtomics(rsnew, temp);
//WARN: has to have this sync!
__syncthreads();
#ifdef DEBUG
if(threadIdx.x==0){
printf("***rsnew:\n");
printf("rsnew = %f \n", rsnew[0]);
printf("***shared memory content after 3rd blockReduceSum:\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedp[i]);
printf("\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedr[i]);
printf("\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedap[i]);
printf("\n");
}
#endif
if(rsnew[0]<CG_ERROR)
break;
//NOT needed?
//__syncthreads();
//beta
if(threadIdx.x == 0){
beta[0] = rsnew[0]/rsold[0];
//rsold=rsnew;
rsold[0] = rsnew[0];
}
//need sync since every thread needs beta[0]
__syncthreads();
//p=r+(rsnew/rsold)*p;
sharedp[threadIdx.x] =
sharedr[threadIdx.x] + beta[0] * sharedp[threadIdx.x];
//need sync as every thread needs sharedp at the beginning of for
__syncthreads();
#ifdef DEBUG
__syncthreads();
if(threadIdx.x==0){
printf("***shared memory content after update p:\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedp[i]);
printf("\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedr[i]);
printf("\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedap[i]);
printf("\n");
}
__syncthreads();
#endif
}//end of CG iterations
//x<--sharedx
x[blockIdx.x*blockDim.x + threadIdx.x] = sharedx[threadIdx.x];
}
//blockDim.x=64 or 96 (two or three WARPs) instead of 100 -- WARP shuffle seems requiring this
__global__ void updateXWithCGKernel2(float * A, float * x, float * b, const int batchSize, const int f, const float cgIter){
extern __shared__ float smem[];
float *sharedx = &smem[0];
float *sharedp = &smem[f];
float *sharedr = &smem[2*f];
float *sharedap = &smem[3*f];
float *rsold = &smem[4*f];
float *alpha = &smem[4*f+1];
float *rsnew = &smem[4*f+2];
float *beta = &smem[4*f+3];
//sharedx<--x
for(int k = threadIdx.x; k < f; k += blockDim.x)
sharedx[k] = x[blockIdx.x*f + k];
__syncthreads();
//r=b-A*x;
float temp = 0;
for(int k = threadIdx.x; k < f; k += blockDim.x){
temp = 0;
for(int i = 0; i < f; i++)
temp += A[blockIdx.x*f*f + f*i + k]*sharedx[i];
sharedr[k] = b[blockIdx.x*f + k] - temp;
//p=r;
sharedp[k] = sharedr[k];
}
//rsold=r'*r;
if(threadIdx.x == 0){
rsold[0] = 0;
}
temp = 0;
for(int k = threadIdx.x; k < f; k += blockDim.x){
temp += sharedr[k]*sharedr[k];
}
blockReduceSumWithAtomics(rsold, temp);
//temp = blockReduceSum(shared, temp);
__syncthreads();
#ifdef DEBUG
if(threadIdx.x==0){
printf("***rsold:\n");
printf("rsold = %f \n", rsold[0]);
printf("***shared memory content after 1st blockReduceSum:\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedp[i]);
printf("\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedr[i]);
printf("\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedap[i]);
printf("\n");
}
#endif
for(int iter = 0; iter < cgIter; iter++){
//ap=A*p;
//WARN: set temp to zero since the next operation is +=!
for(int k = threadIdx.x; k < f; k += blockDim.x){
temp = 0;
for(int i = 0; i < f; i++)
temp += A[blockIdx.x*f*f + f*i + k]*sharedp[i];
sharedap[k] = temp;
}
#ifdef DEBUG
__syncthreads();
if(threadIdx.x==0){
printf("----------CG iteration %d \n", iter);
printf("***ap:\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedap[i]);
printf("\n");
printf("***shared memory content before 2rd blockReduceSum:\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedp[i]);
printf("\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedr[i]);
printf("\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedap[i]);
printf("\n");
}
#endif
if(threadIdx.x == 0){
rsnew[0] = 0;
}
//no need to have sync before blockReduce
//because there is a __syncthreads() in blockReduce
//pAp=p'*Ap
temp = 0;
for(int k = threadIdx.x; k < f; k += blockDim.x)
temp += sharedp[k]*sharedap[k];
//temp = blockReduceSum(shared, temp);
blockReduceSumWithAtomics(rsnew, temp);
//sync needed, to let all atomicAdd threads completes
__syncthreads();
if(threadIdx.x == 0){
//pAp = temp;
//alpha=rsold/(p'*Ap); use rsnew to store pAp
alpha[0] = rsold[0]/rsnew[0];
#ifdef DEBUG
printf("***rsold:\n");
printf("rsold = %f \n", rsold[0]);
printf("***pAp:\n");
printf("pAp = %f \n", rsnew[0]);
printf("***alpha:\n");
printf("alpha = %f \n", alpha[0]);
#endif
rsnew[0] = 0;
}
//needed, aplpha[0] to be used by all threads
__syncthreads();
for(int k = threadIdx.x; k < f; k += blockDim.x){
//x=x+alpha*p;
sharedx[k] =
sharedx[k] + alpha[0] * sharedp[k];
//r=r-alpha*Ap;
sharedr[k] =
sharedr[k] - alpha[0] * sharedap[k];
}
//NOT needed?
//__syncthreads();
#ifdef DEBUG
__syncthreads();
if(threadIdx.x==0){
printf("***shared memory content before 3rd blockReduceSum:\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedp[i]);
printf("\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedr[i]);
printf("\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedap[i]);
printf("\n");
}
#endif
//rsnew=r'*r;
/*
temp = sharedr[threadIdx.x]*sharedr[threadIdx.x];
temp = blockReduceSum(shared, temp);
__syncthreads();
if(threadIdx.x == 0){
rsnew[0] = temp;
}
*/
temp = 0;
for(int k = threadIdx.x; k < f; k += blockDim.x)
temp += sharedr[k]*sharedr[k];
blockReduceSumWithAtomics(rsnew, temp);
//WARN: has to have this sync!
__syncthreads();
#ifdef DEBUG
if(threadIdx.x==0){
printf("***rsnew:\n");
printf("rsnew = %f \n", rsnew[0]);
printf("***shared memory content after 3rd blockReduceSum:\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedp[i]);
printf("\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedr[i]);
printf("\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedap[i]);
printf("\n");
}
#endif
if(rsnew[0]<CG_ERROR)
break;
//NOT needed?
//__syncthreads();
//beta
if(threadIdx.x == 0){
beta[0] = rsnew[0]/rsold[0];
//rsold=rsnew;
rsold[0] = rsnew[0];
}
//need sync since every thread needs beta[0]
__syncthreads();
for(int k = threadIdx.x; k < f; k += blockDim.x)
//p=r+(rsnew/rsold)*p;
sharedp[k] =
sharedr[k] + beta[0] * sharedp[k];
//need sync as every thread needs sharedp at the beginning of for
__syncthreads();
#ifdef DEBUG
__syncthreads();
if(threadIdx.x==0){
printf("***shared memory content after update p:\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedp[i]);
printf("\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedr[i]);
printf("\n");
for(int i = 0; i < f; i++)
printf("%f ", sharedap[i]);
printf("\n");
}
__syncthreads();
#endif
}//end of CG iterations
for(int k = threadIdx.x; k < f; k += blockDim.x)
//x<--sharedx
x[blockIdx.x*f + k] = sharedx[k];
}
void updateXWithCGHost_tt_fp16(float * A, float * x, float * b, const int batchSize, const int f, const float cgIter){
updateXWithCGKernel3<<<batchSize, f, (4*f+4)*sizeof(float)>>>
((half*)A, x, b, batchSize, f, cgIter);
cudaDeviceSynchronize();
cudaCheckError();
#ifdef DEBUG
printf("***A[0]:\n");
float *h_A = new float[f * f];
float *A_fp32;
cudacall(cudaMalloc((void** ) &A_fp32, f * f * sizeof(A_fp32[0])));
fp16Array2fp32Array<<<(f*f-1)/1024 + 1, 1024>>>(A_fp32, (half*)A, f*f);
cudaDeviceSynchronize();
cudaCheckError();
cudacall(cudaMemcpy(h_A, A_fp32, f * f * sizeof(float), cudaMemcpyDeviceToHost));
for(int i = 0; i < f*f; i++)
printf("%f ", h_A[i]);
printf("\n");
delete [] h_A;
cudacall(cudaFree(A_fp32));
printf("***x[0]:\n");
float *h_x = new float[f];
cudacall(cudaMemcpy(h_x, x, f * sizeof(float), cudaMemcpyDeviceToHost));
for(int i = 0; i < f; i++)
printf("%f ", h_x[i]);
printf("\n");
delete [] h_x;
/*
printf("***b[0]:\n");
float *h_b = new float[f];
cudacall(cudaMemcpy(h_b, b, f * sizeof(float), cudaMemcpyDeviceToHost));
for(int i = 0; i < f; i++)
printf("%f ", h_b[i]);
printf("\n");
delete [] h_b;
*/
#endif
}
void updateXWithCGHost(float * A, float * x, float * b, const int batchSize, const int f, const float cgIter){
updateXWithCGKernel<<<batchSize, f, (4*f+4)*sizeof(float)>>>
//updateXWithCGKernel2<<<batchSize, 96, (4*f+4)*sizeof(float)>>>
(A, x, b, batchSize, f, cgIter);
cudaDeviceSynchronize();
cudaCheckError();
#ifdef DEBUG
printf("***A[0]:\n");
float *h_A = new float[f * f];
float *A_fp32;
cudacall(cudaMalloc((void** ) &A_fp32, f * f * sizeof(A_fp32[0])));
fp16Array2fp32Array<<<(f*f-1)/1024 + 1, 1024>>>(A_fp32, (half*)A, f*f);
cudaDeviceSynchronize();
cudaCheckError();
cudacall(cudaMemcpy(h_A, A_fp32, f * f * sizeof(float), cudaMemcpyDeviceToHost));
for(int i = 0; i < f*f; i++)
printf("%f ", h_A[i]);
printf("\n");
delete [] h_A;
cudacall(cudaFree(A_fp32));
printf("***x[0]:\n");
float *h_x = new float[f];
cudacall(cudaMemcpy(h_x, x, f * sizeof(float), cudaMemcpyDeviceToHost));
for(int i = 0; i < f; i++)
printf("%f ", h_x[i]);
printf("\n");
delete [] h_x;
/*
printf("***b[0]:\n");
float *h_b = new float[f];
cudacall(cudaMemcpy(h_b, b, f * sizeof(float), cudaMemcpyDeviceToHost));
for(int i = 0; i < f; i++)
printf("%f ", h_b[i]);
printf("\n");
delete [] h_b;
*/
#endif
}
//fused kernel, use thetaT to update XT
__global__ void
__launch_bounds__(64)
alsUpdateFeature100(const int batch_offset,
const int* csrRowIndex, const int* csrColIndex, const float lambda, const int m, const int F,
const float* thetaT, float* XT, float* ythetaT, int cgIter) {
extern __shared__ float2 thetaTemp[];
int row = blockIdx.x + batch_offset;
if (row < m) {
//this block needs to handle end - start thetaT columns
int start = csrRowIndex[row];
int end = csrRowIndex[row + 1];
//slide through [start, end] by window size SCAN_BATCH
int iterations = (end - start - 1)/SCAN_BATCH + 1;
float temp0= 0, temp1= 0, temp2= 0, temp3= 0, temp4= 0, temp5= 0, temp6= 0, temp7= 0, temp8= 0, temp9 = 0;
float temp10= 0, temp11= 0, temp12= 0, temp13= 0, temp14= 0, temp15= 0, temp16= 0, temp17= 0, temp18= 0, temp19 = 0;
float temp20= 0, temp21= 0, temp22= 0, temp23= 0, temp24= 0, temp25= 0, temp26= 0, temp27= 0, temp28= 0, temp29 = 0;
float temp30= 0, temp31= 0, temp32= 0, temp33= 0, temp34= 0, temp35= 0, temp36= 0, temp37= 0, temp38= 0, temp39 = 0;
float temp40= 0, temp41= 0, temp42= 0, temp43= 0, temp44= 0, temp45= 0, temp46= 0, temp47= 0, temp48= 0, temp49 = 0;
float temp50= 0, temp51= 0, temp52= 0, temp53= 0, temp54= 0, temp55= 0, temp56= 0, temp57= 0, temp58= 0, temp59 = 0;
float temp60= 0, temp61= 0, temp62= 0, temp63= 0, temp64= 0, temp65= 0, temp66= 0, temp67= 0, temp68= 0, temp69 = 0;
float temp70= 0, temp71= 0, temp72= 0, temp73= 0, temp74= 0, temp75= 0, temp76= 0, temp77= 0, temp78= 0, temp79 = 0;
float temp80= 0, temp81= 0, temp82= 0, temp83= 0, temp84= 0, temp85= 0, temp86= 0, temp87= 0, temp88= 0, temp89 = 0;
float temp90= 0, temp91= 0, temp92= 0, temp93= 0, temp94= 0, temp95= 0, temp96= 0, temp97= 0, temp98= 0, temp99 = 0;
int tile_x = 0;
int tile_y = 0;
int tile = F/10;
for ( int i = 0; i < 10; i++){
int end = ((20-i)*(i+1))/2;
if(threadIdx.x < end){
tile_x = i * tile;
tile_y = (10 + threadIdx.x - end) * tile;
break;
}
}
//iteration: copy gmem-->smem; aggregate smem-->register
for (int iter = 0; iter < iterations; iter ++){
float2 theta;
//copy texture --> smem, and sync
//two layers: warp divergence unless we split at 32
//require 32 >= SCAN_BATCH
if(threadIdx.x < 2*32 ){
//int index = threadIdx.x;
int index = threadIdx.x - (threadIdx.x/32)*32; //0 to 31;
if(index < SCAN_BATCH){
if(iter*SCAN_BATCH + index < end - start){
//for (int k = 50*(threadIdx.x/32); k < 50*(threadIdx.x/32) + 50; k += 2){
//IMPORTANT: for loop has constant and identical start and end
if(threadIdx.x < 32){
for (int k = 0; k < 50; k += 2){
theta.x = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k]);
theta.y = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k+1]);
thetaTemp[index * F/2 + k/2] = theta;
}
}
else {
for (int k = 0; k < 50; k += 2){
theta.x = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k + 50]);
theta.y = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k + 51]);
thetaTemp[index * F/2 + k/2 + 25] = theta;
}
}
}
//must be the last iteration; no need to check
//not enough theta to copy, set zero
else
memset(&thetaTemp[index*F/2], 0, F*sizeof(float));
}
}
__syncthreads();
//tile: 10*10
if(threadIdx.x < 55 ){
for(int k = 0; k < SCAN_BATCH; k++){
accumulate_in_registers();
}
}
}
//end of iteration in copying from smem and aggregating in register
__syncthreads();
#ifdef DEBUG
if(blockIdx.x==0 && threadIdx.x==0){
printf("***temp 0~9: %f %f %f %f %f %f %f %f %f %f\n", temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8, temp9);
}
#endif
//newly added CG phase
//reuse the abundant shared memory
float *sharedx = (float*)&thetaTemp[0];
float *sharedp = (float*)&thetaTemp[50];
float *sharedr = (float*)&thetaTemp[100];
float *sharedap = (float*)&thetaTemp[150];
float *sharedax = (float*)&thetaTemp[200];
float *rsold = (float*)&thetaTemp[250];
float *alpha = (float*)&thetaTemp[251];
float *rsnew = (float*)&thetaTemp[252];
float *beta = (float*)&thetaTemp[253];
//sharedx<--x
for(int k = threadIdx.x; k < F; k += 64){
sharedx[k] = XT[blockIdx.x*F + k];
sharedax[k] = 0;
}
__syncthreads();
float temp = 0;
//only uses 55 threads for A*p and A*x
if(threadIdx.x < 55){
//add regularization
if(tile_x==tile_y){
temp = (end - start) * lambda;
temp0 += temp;
temp11 += temp;
temp22 += temp;
temp33 += temp;
temp44 += temp;
temp55 += temp;
temp66 += temp;
temp77 += temp;
temp88 += temp;
temp99 += temp;
}
#ifdef DEBUG
if(blockIdx.x==0 && threadIdx.x==0){
printf("***temp 0~9: %f %f %f %f %f %f %f %f %f %f\n", temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8, temp9);
}
#endif
//r=b-A*x;
//step1: ax=A*x
atomicAdd(&sharedax[tile_y], temp0*sharedx[tile_x] + temp10*sharedx[tile_x+1] + temp20*sharedx[tile_x+2] + temp30*sharedx[tile_x+3] +
temp40*sharedx[tile_x + 4] + temp50*sharedx[tile_x + 5] + temp60*sharedx[tile_x + 6] + temp70*sharedx[tile_x + 7] +
temp80*sharedx[tile_x + 8] + temp90*sharedx[tile_x + 9]);
atomicAdd(&sharedax[tile_y+1], temp1*sharedx[tile_x] + temp11*sharedx[tile_x+1] + temp21*sharedx[tile_x+2] + temp31*sharedx[tile_x+3] +
temp41*sharedx[tile_x+4] + temp51*sharedx[tile_x+5] + temp61*sharedx[tile_x+6] +
temp71*sharedx[tile_x+7] + temp81*sharedx[tile_x+8] + temp91*sharedx[tile_x+9]);
atomicAdd(&sharedax[tile_y+2], temp2*sharedx[tile_x] + temp12*sharedx[tile_x+1] + temp22*sharedx[tile_x+2] + temp32*sharedx[tile_x+3] +
temp42*sharedx[tile_x+4] + temp52*sharedx[tile_x+5] + temp62*sharedx[tile_x+6] +
temp72*sharedx[tile_x+7] + temp82*sharedx[tile_x+8] + temp92*sharedx[tile_x+9]);
atomicAdd(&sharedax[tile_y+3], temp3*sharedx[tile_x] + temp13*sharedx[tile_x+1] + temp23*sharedx[tile_x+2] + temp33*sharedx[tile_x+3] +
temp43*sharedx[tile_x+4] + temp53*sharedx[tile_x+5] + temp63*sharedx[tile_x+6] +
temp73*sharedx[tile_x+7] + temp83*sharedx[tile_x+8] + temp93*sharedx[tile_x+9]);
atomicAdd(&sharedax[tile_y+4], temp4*sharedx[tile_x] + temp14*sharedx[tile_x+1] + temp24*sharedx[tile_x+2] + temp34*sharedx[tile_x+3] +
temp44*sharedx[tile_x+4] + temp54*sharedx[tile_x+5] + temp64*sharedx[tile_x+6] +
temp74*sharedx[tile_x+7] + temp84*sharedx[tile_x+8] + temp94*sharedx[tile_x+9]);
atomicAdd(&sharedax[tile_y+5], temp5*sharedx[tile_x] + temp15*sharedx[tile_x+1] + temp25*sharedx[tile_x+2] + temp35*sharedx[tile_x+3] +
temp45*sharedx[tile_x+4] + temp55*sharedx[tile_x+5] + temp65*sharedx[tile_x+6] +
temp75*sharedx[tile_x+7] + temp85*sharedx[tile_x+8] + temp95*sharedx[tile_x+9]);
atomicAdd(&sharedax[tile_y+6], temp6*sharedx[tile_x] + temp16*sharedx[tile_x+1] + temp26*sharedx[tile_x+2] + temp36*sharedx[tile_x+3] +
temp46*sharedx[tile_x+4] + temp56*sharedx[tile_x+5] + temp66*sharedx[tile_x+6] +
temp76*sharedx[tile_x+7] + temp86*sharedx[tile_x+8] + temp96*sharedx[tile_x+9]);
atomicAdd(&sharedax[tile_y+7], temp7*sharedx[tile_x] + temp17*sharedx[tile_x+1] + temp27*sharedx[tile_x+2] + temp37*sharedx[tile_x+3] +
temp47*sharedx[tile_x+4] + temp57*sharedx[tile_x+5] + temp67*sharedx[tile_x+6] +
temp77*sharedx[tile_x+7] + temp87*sharedx[tile_x+8] + temp97*sharedx[tile_x+9]);
atomicAdd(&sharedax[tile_y+8], temp8*sharedx[tile_x] + temp18*sharedx[tile_x+1] + temp28*sharedx[tile_x+2] + temp38*sharedx[tile_x+3] +
temp48*sharedx[tile_x+4] + temp58*sharedx[tile_x+5] + temp68*sharedx[tile_x+6] +
temp78*sharedx[tile_x+7] + temp88*sharedx[tile_x+8] + temp98*sharedx[tile_x+9]);
atomicAdd(&sharedax[tile_y+9], temp9*sharedx[tile_x] + temp19*sharedx[tile_x+1] + temp29*sharedx[tile_x+2] + temp39*sharedx[tile_x+3] +
temp49*sharedx[tile_x+4] + temp59*sharedx[tile_x+5] + temp69*sharedx[tile_x+6] +
temp79*sharedx[tile_x+7] + temp89*sharedx[tile_x+8] + temp99*sharedx[tile_x+9]);
if(tile_x!=tile_y){
atomicAdd(&sharedax[tile_x], temp0*sharedx[tile_y] + temp1*sharedx[tile_y + 1] + temp2*sharedx[tile_y + 2] + temp3*sharedx[tile_y + 3] +
temp4*sharedx[tile_y + 4] + temp5*sharedx[tile_y + 5] + temp6*sharedx[tile_y + 6] + temp7*sharedx[tile_y + 7] +
temp8*sharedx[tile_y + 8] + temp9*sharedx[tile_y + 9]);
atomicAdd(&sharedax[tile_x+1], temp10*sharedx[tile_y] + temp11*sharedx[tile_y+1] + temp12*sharedx[tile_y+2] + temp13*sharedx[tile_y+3] +
temp14*sharedx[tile_y+4] + temp15*sharedx[tile_y+5] + temp16*sharedx[tile_y+6] +
temp17*sharedx[tile_y+7] + temp18*sharedx[tile_y+8] + temp19*sharedx[tile_y+9]);
atomicAdd(&sharedax[tile_x+2], temp20*sharedx[tile_y] + temp21*sharedx[tile_y+1] + temp22*sharedx[tile_y+2] + temp23*sharedx[tile_y+3] +
temp24*sharedx[tile_y+4] + temp25*sharedx[tile_y+5] + temp26*sharedx[tile_y+6] +
temp27*sharedx[tile_y+7] + temp28*sharedx[tile_y+8] + temp29*sharedx[tile_y+9]);
atomicAdd(&sharedax[tile_x+3], temp30*sharedx[tile_y] + temp31*sharedx[tile_y+1] + temp32*sharedx[tile_y+2] + temp33*sharedx[tile_y+3] +
temp34*sharedx[tile_y+4] + temp35*sharedx[tile_y+5] + temp36*sharedx[tile_y+6] +
temp37*sharedx[tile_y+7] + temp38*sharedx[tile_y+8] + temp39*sharedx[tile_y+9]);
atomicAdd(&sharedax[tile_x+4], temp40*sharedx[tile_y] + temp41*sharedx[tile_y+1] + temp42*sharedx[tile_y+2] + temp43*sharedx[tile_y+3] +
temp44*sharedx[tile_y+4] + temp45*sharedx[tile_y+5] + temp46*sharedx[tile_y+6] +
temp47*sharedx[tile_y+7] + temp48*sharedx[tile_y+8] + temp49*sharedx[tile_y+9]);
atomicAdd(&sharedax[tile_x+5], temp50*sharedx[tile_y] + temp51*sharedx[tile_y+1] + temp52*sharedx[tile_y+2] + temp53*sharedx[tile_y+3] +
temp54*sharedx[tile_y+4] + temp55*sharedx[tile_y+5] + temp56*sharedx[tile_y+6] +
temp57*sharedx[tile_y+7] + temp58*sharedx[tile_y+8] + temp59*sharedx[tile_y+9]);
atomicAdd(&sharedax[tile_x+6], temp60*sharedx[tile_y] + temp61*sharedx[tile_y+1] + temp62*sharedx[tile_y+2] + temp63*sharedx[tile_y+3] +
temp64*sharedx[tile_y+4] + temp65*sharedx[tile_y+5] + temp66*sharedx[tile_y+6] +
temp67*sharedx[tile_y+7] + temp68*sharedx[tile_y+8] + temp69*sharedx[tile_y+9]);
atomicAdd(&sharedax[tile_x+7], temp70*sharedx[tile_y] + temp71*sharedx[tile_y+1] + temp72*sharedx[tile_y+2] + temp73*sharedx[tile_y+3] +
temp74*sharedx[tile_y+4] + temp75*sharedx[tile_y+5] + temp76*sharedx[tile_y+6] +
temp77*sharedx[tile_y+7] + temp78*sharedx[tile_y+8] + temp79*sharedx[tile_y+9]);
atomicAdd(&sharedax[tile_x+8], temp80*sharedx[tile_y] + temp81*sharedx[tile_y+1] + temp82*sharedx[tile_y+2] + temp83*sharedx[tile_y+3] +
temp84*sharedx[tile_y+4] + temp85*sharedx[tile_y+5] + temp86*sharedx[tile_y+6] +
temp87*sharedx[tile_y+7] + temp88*sharedx[tile_y+8] + temp89*sharedx[tile_y+9]);
atomicAdd(&sharedax[tile_x+9], temp90*sharedx[tile_y] + temp91*sharedx[tile_y+1] + temp92*sharedx[tile_y+2] + temp93*sharedx[tile_y+3] +
temp94*sharedx[tile_y+4] + temp95*sharedx[tile_y+5] + temp96*sharedx[tile_y+6] +
temp97*sharedx[tile_y+7] + temp98*sharedx[tile_y+8] + temp99*sharedx[tile_y+9]);
}
}
__syncthreads();
#ifdef DEBUG
if(blockIdx.x==0 && threadIdx.x==0){
printf("***x:\n");
for(int i = 0; i < 100; i++)
printf("%f ", sharedx[i]);
printf("\n\n");
printf("***r=Ax:\n");
for(int i = 0; i < 100; i++)
printf("%f ", sharedax[i]);
printf("\n\n");
}
#endif
for(int k = threadIdx.x; k < F; k += 64){
//r=b-Ax
sharedr[k] = ythetaT[blockIdx.x*blockDim.x + k] - sharedax[k];
//p=r;
sharedp[k] = sharedr[k];
}
//rsold=r'*r;
if(threadIdx.x == 0){
rsold[0] = 0;
}
for(int k = threadIdx.x; k < F; k += 64){
temp += sharedr[k]*sharedr[k];
}
blockReduceSumWithAtomics(rsold, temp);
__syncthreads();
#ifdef DEBUG
if(blockIdx.x==0 && threadIdx.x==0){
printf("***rsold:\n");
printf("rsold = %f \n", rsold[0]);
printf("***shared memory content after 1st blockReduceSum:\n");
for(int i = 0; i < 100; i++)
printf("%f ", sharedx[i]);
printf("\n");
for(int i = 0; i < 100; i++)
printf("%f ", sharedax[i]);
printf("\n\n");
for(int i = 0; i < 100; i++)
printf("%f ", sharedp[i]);
printf("\n");
for(int i = 0; i < 100; i++)
printf("%f ", sharedr[i]);
printf("\n");
for(int i = 0; i < 100; i++)
printf("%f ", sharedap[i]);
printf("\n");
}
#endif
///*
//CG iterations
for(int iter = 0; iter < cgIter; iter++){
//ap=A*p;
for(int k = threadIdx.x; k < F; k += 64)
sharedap[k] = 0;
__syncthreads();
//only uses 55 threads for A*p and A*x
if(threadIdx.x < 55){
atomicAdd(&sharedap[tile_y], temp0*sharedp[tile_x] + temp10*sharedp[tile_x+1] + temp20*sharedp[tile_x+2] + temp30*sharedp[tile_x+3] +
temp40*sharedp[tile_x + 4] + temp50*sharedp[tile_x + 5] + temp60*sharedp[tile_x + 6] + temp70*sharedp[tile_x + 7] +
temp80*sharedp[tile_x + 8] + temp90*sharedp[tile_x + 9]);
atomicAdd(&sharedap[tile_y+1], temp1*sharedp[tile_x] + temp11*sharedp[tile_x+1] + temp21*sharedp[tile_x+2] + temp31*sharedp[tile_x+3] +
temp41*sharedp[tile_x+4] + temp51*sharedp[tile_x+5] + temp61*sharedp[tile_x+6] +
temp71*sharedp[tile_x+7] + temp81*sharedp[tile_x+8] + temp91*sharedp[tile_x+9]);
atomicAdd(&sharedap[tile_y+2], temp2*sharedp[tile_x] + temp12*sharedp[tile_x+1] + temp22*sharedp[tile_x+2] + temp32*sharedp[tile_x+3] +
temp42*sharedp[tile_x+4] + temp52*sharedp[tile_x+5] + temp62*sharedp[tile_x+6] +
temp72*sharedp[tile_x+7] + temp82*sharedp[tile_x+8] + temp92*sharedp[tile_x+9]);
atomicAdd(&sharedap[tile_y+3], temp3*sharedp[tile_x] + temp13*sharedp[tile_x+1] + temp23*sharedp[tile_x+2] + temp33*sharedp[tile_x+3] +
temp43*sharedp[tile_x+4] + temp53*sharedp[tile_x+5] + temp63*sharedp[tile_x+6] +
temp73*sharedp[tile_x+7] + temp83*sharedp[tile_x+8] + temp93*sharedp[tile_x+9]);
atomicAdd(&sharedap[tile_y+4], temp4*sharedp[tile_x] + temp14*sharedp[tile_x+1] + temp24*sharedp[tile_x+2] + temp34*sharedp[tile_x+3] +
temp44*sharedp[tile_x+4] + temp54*sharedp[tile_x+5] + temp64*sharedp[tile_x+6] +
temp74*sharedp[tile_x+7] + temp84*sharedp[tile_x+8] + temp94*sharedp[tile_x+9]);
atomicAdd(&sharedap[tile_y+5], temp5*sharedp[tile_x] + temp15*sharedp[tile_x+1] + temp25*sharedp[tile_x+2] + temp35*sharedp[tile_x+3] +
temp45*sharedp[tile_x+4] + temp55*sharedp[tile_x+5] + temp65*sharedp[tile_x+6] +
temp75*sharedp[tile_x+7] + temp85*sharedp[tile_x+8] + temp95*sharedp[tile_x+9]);
atomicAdd(&sharedap[tile_y+6], temp6*sharedp[tile_x] + temp16*sharedp[tile_x+1] + temp26*sharedp[tile_x+2] + temp36*sharedp[tile_x+3] +
temp46*sharedp[tile_x+4] + temp56*sharedp[tile_x+5] + temp66*sharedp[tile_x+6] +
temp76*sharedp[tile_x+7] + temp86*sharedp[tile_x+8] + temp96*sharedp[tile_x+9]);
atomicAdd(&sharedap[tile_y+7], temp7*sharedp[tile_x] + temp17*sharedp[tile_x+1] + temp27*sharedp[tile_x+2] + temp37*sharedp[tile_x+3] +
temp47*sharedp[tile_x+4] + temp57*sharedp[tile_x+5] + temp67*sharedp[tile_x+6] +
temp77*sharedp[tile_x+7] + temp87*sharedp[tile_x+8] + temp97*sharedp[tile_x+9]);
atomicAdd(&sharedap[tile_y+8], temp8*sharedp[tile_x] + temp18*sharedp[tile_x+1] + temp28*sharedp[tile_x+2] + temp38*sharedp[tile_x+3] +
temp48*sharedp[tile_x+4] + temp58*sharedp[tile_x+5] + temp68*sharedp[tile_x+6] +
temp78*sharedp[tile_x+7] + temp88*sharedp[tile_x+8] + temp98*sharedp[tile_x+9]);
atomicAdd(&sharedap[tile_y+9], temp9*sharedp[tile_x] + temp19*sharedp[tile_x+1] + temp29*sharedp[tile_x+2] + temp39*sharedp[tile_x+3] +
temp49*sharedp[tile_x+4] + temp59*sharedp[tile_x+5] + temp69*sharedp[tile_x+6] +
temp79*sharedp[tile_x+7] + temp89*sharedp[tile_x+8] + temp99*sharedp[tile_x+9]);
if(tile_x!=tile_y){
atomicAdd(&sharedap[tile_x], temp0*sharedp[tile_y] + temp1*sharedp[tile_y + 1] + temp2*sharedp[tile_y + 2] + temp3*sharedp[tile_y + 3] +
temp4*sharedp[tile_y + 4] + temp5*sharedp[tile_y + 5] + temp6*sharedp[tile_y + 6] + temp7*sharedp[tile_y + 7] +
temp8*sharedp[tile_y + 8] + temp9*sharedp[tile_y + 9]);
atomicAdd(&sharedap[tile_x+1], temp10*sharedp[tile_y] + temp11*sharedp[tile_y+1] + temp12*sharedp[tile_y+2] + temp13*sharedp[tile_y+3] +
temp14*sharedp[tile_y+4] + temp15*sharedp[tile_y+5] + temp16*sharedp[tile_y+6] +
temp17*sharedp[tile_y+7] + temp18*sharedp[tile_y+8] + temp19*sharedp[tile_y+9]);
atomicAdd(&sharedap[tile_x+2], temp20*sharedp[tile_y] + temp21*sharedp[tile_y+1] + temp22*sharedp[tile_y+2] + temp23*sharedp[tile_y+3] +
temp24*sharedp[tile_y+4] + temp25*sharedp[tile_y+5] + temp26*sharedp[tile_y+6] +
temp27*sharedp[tile_y+7] + temp28*sharedp[tile_y+8] + temp29*sharedp[tile_y+9]);
atomicAdd(&sharedap[tile_x+3], temp30*sharedp[tile_y] + temp31*sharedp[tile_y+1] + temp32*sharedp[tile_y+2] + temp33*sharedp[tile_y+3] +
temp34*sharedp[tile_y+4] + temp35*sharedp[tile_y+5] + temp36*sharedp[tile_y+6] +
temp37*sharedp[tile_y+7] + temp38*sharedp[tile_y+8] + temp39*sharedp[tile_y+9]);
atomicAdd(&sharedap[tile_x+4], temp40*sharedp[tile_y] + temp41*sharedp[tile_y+1] + temp42*sharedp[tile_y+2] + temp43*sharedp[tile_y+3] +
temp44*sharedp[tile_y+4] + temp45*sharedp[tile_y+5] + temp46*sharedp[tile_y+6] +
temp47*sharedp[tile_y+7] + temp48*sharedp[tile_y+8] + temp49*sharedp[tile_y+9]);
atomicAdd(&sharedap[tile_x+5], temp50*sharedp[tile_y] + temp51*sharedp[tile_y+1] + temp52*sharedp[tile_y+2] + temp53*sharedp[tile_y+3] +
temp54*sharedp[tile_y+4] + temp55*sharedp[tile_y+5] + temp56*sharedp[tile_y+6] +
temp57*sharedp[tile_y+7] + temp58*sharedp[tile_y+8] + temp59*sharedp[tile_y+9]);
atomicAdd(&sharedap[tile_x+6], temp60*sharedp[tile_y] + temp61*sharedp[tile_y+1] + temp62*sharedp[tile_y+2] + temp63*sharedp[tile_y+3] +
temp64*sharedp[tile_y+4] + temp65*sharedp[tile_y+5] + temp66*sharedp[tile_y+6] +
temp67*sharedp[tile_y+7] + temp68*sharedp[tile_y+8] + temp69*sharedp[tile_y+9]);
atomicAdd(&sharedap[tile_x+7], temp70*sharedp[tile_y] + temp71*sharedp[tile_y+1] + temp72*sharedp[tile_y+2] + temp73*sharedp[tile_y+3] +
temp74*sharedp[tile_y+4] + temp75*sharedp[tile_y+5] + temp76*sharedp[tile_y+6] +
temp77*sharedp[tile_y+7] + temp78*sharedp[tile_y+8] + temp79*sharedp[tile_y+9]);
atomicAdd(&sharedap[tile_x+8], temp80*sharedp[tile_y] + temp81*sharedp[tile_y+1] + temp82*sharedp[tile_y+2] + temp83*sharedp[tile_y+3] +
temp84*sharedp[tile_y+4] + temp85*sharedp[tile_y+5] + temp86*sharedp[tile_y+6] +
temp87*sharedp[tile_y+7] + temp88*sharedp[tile_y+8] + temp89*sharedp[tile_y+9]);
atomicAdd(&sharedap[tile_x+9], temp90*sharedp[tile_y] + temp91*sharedp[tile_y+1] + temp92*sharedp[tile_y+2] + temp93*sharedp[tile_y+3] +
temp94*sharedp[tile_y+4] + temp95*sharedp[tile_y+5] + temp96*sharedp[tile_y+6] +
temp97*sharedp[tile_y+7] + temp98*sharedp[tile_y+8] + temp99*sharedp[tile_y+9]);
}
}
__syncthreads();
#ifdef DEBUG
if(blockIdx.x==0 && threadIdx.x==0){
printf("----------CG iteration %d \n", iter);
printf("***ap:\n");
for(int i = 0; i < F; i++)
printf("%f ", sharedap[i]);
printf("\n\n");
printf("***shared memory content before 2rd blockReduceSum:\n");
for(int i = 0; i < F; i++)
printf("%f ", sharedp[i]);
printf("\n\n");
for(int i = 0; i < F; i++)
printf("%f ", sharedr[i]);
printf("\n\n");
for(int i = 0; i < F; i++)
printf("%f ", sharedap[i]);
printf("\n\n");
}
#endif
if(threadIdx.x == 0){
rsnew[0] = 0;
}
//no need to have sync before blockReduce
//because there is a __syncthreads() in blockReduce
//pAp=p'*Ap
temp = 0;
for(int k = threadIdx.x; k < F; k += 64)
temp += sharedp[k]*sharedap[k];
//temp = blockReduceSum(shared, temp);
blockReduceSumWithAtomics(rsnew, temp);
//sync needed, to let all atomicAdd threads completes
__syncthreads();
if(threadIdx.x == 0){
//pAp = temp;
//alpha=rsold/(p'*Ap); use rsnew to store pAp
alpha[0] = rsold[0]/rsnew[0];
#ifdef DEBUG
if(blockIdx.x==0){
printf("***rsold:\n");
printf("rsold = %f \n", rsold[0]);
printf("***pAp:\n");
printf("pAp = %f \n", rsnew[0]);
printf("***alpha:\n");
printf("alpha = %f \n", alpha[0]);
}
#endif
rsnew[0] = 0;
}
//needed, aplpha[0] to be used by all threads
__syncthreads();
for(int k = threadIdx.x; k < F; k += 64){
//x=x+alpha*p;
sharedx[k] = sharedx[k] + alpha[0] * sharedp[k];
//r=r-alpha*Ap;
sharedr[k] = sharedr[k] - alpha[0] * sharedap[k];
//NOT needed?
//__syncthreads();
}
__syncthreads();
#ifdef DEBUG
if(blockIdx.x==0 && threadIdx.x==0){
printf("***shared memory content before 3rd blockReduceSum:\n");
for(int i = 0; i < F; i++)
printf("%f ", sharedp[i]);
printf("\n");
for(int i = 0; i < F; i++)
printf("%f ", sharedr[i]);
printf("\n");
for(int i = 0; i < F; i++)
printf("%f ", sharedap[i]);
printf("\n");
}
#endif
//rsnew=r'*r;
temp = 0;
for(int k = threadIdx.x; k < F; k += 64)
temp += sharedr[k]*sharedr[k];
blockReduceSumWithAtomics(rsnew, temp);
//WARN: has to have this sync!
__syncthreads();
#ifdef DEBUG
if(blockIdx.x==0 && threadIdx.x==0){
printf("***rsnew:\n");
printf("rsnew = %f \n", rsnew[0]);
printf("***shared memory content after 3rd blockReduceSum:\n");
for(int i = 0; i < F; i++)
printf("%f ", sharedp[i]);
printf("\n");
for(int i = 0; i < F; i++)
printf("%f ", sharedr[i]);
printf("\n");
for(int i = 0; i < F; i++)
printf("%f ", sharedap[i]);
printf("\n");
}
#endif
if(rsnew[0]<CG_ERROR)
break;
//NOT needed?
//__syncthreads();
//beta
if(threadIdx.x == 0){
beta[0] = rsnew[0]/rsold[0];
//rsold=rsnew;
rsold[0] = rsnew[0];
}
//need sync since every thread needs beta[0]
__syncthreads();
//p=r+(rsnew/rsold)*p;
for(int k = threadIdx.x; k < F; k += 64)
sharedp[k] = sharedr[k] + beta[0] * sharedp[k];
//need sync as every thread needs sharedp at the beginning of for
__syncthreads();
#ifdef DEBUG
__syncthreads();
if(blockIdx.x==0 && threadIdx.x==0){
printf("***shared memory content after update p:\n");
for(int i = 0; i < F; i++)
printf("%f ", sharedp[i]);
printf("\n");
for(int i = 0; i < F; i++)
printf("%f ", sharedr[i]);
printf("\n");
for(int i = 0; i < F; i++)
printf("%f ", sharedap[i]);
printf("\n");
}
__syncthreads();
#endif
}//end of CG iterations
//x<--sharedx
for(int k = threadIdx.x; k < F; k += 64)
XT[blockIdx.x*F + k] = sharedx[k];
//*/
}
}
void alsUpdateFeature100Host(const int batch_offset,
const int* csrRowIndex, const int* csrColIndex, const float lambda, const int m, const int F,
const float* __restrict__ thetaT, float* XT, float* ythetaT, int cgIter){
alsUpdateFeature100<<<m, 64, SCAN_BATCH * F/2*sizeof(float2)>>>
(batch_offset, csrRowIndex, csrColIndex, lambda, m, F, thetaT, XT, ythetaT, cgIter);
cudaDeviceSynchronize();
cudaCheckError();
} |
5a5c448251ab30a489cdc35bbb164446c0267fa2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
//#include <torch/tensor.h>
#include <ATen/ATen.h>
#include <ATen/Functions.h>
#include <ATen/hip/HIPContext.h>
#include "common.h"
#include "device_tensor.h"
namespace {
template<typename DType, typename Acctype>
struct KD2Op {
__device__ KD2Op(DeviceTensor<DType, 3> x,
DeviceTensor<DType, 2> c,
DeviceTensor<DType, 2> std) : X(x), C(c), STD(std) {}
__device__ __forceinline__ Acctype operator()(int b, int i, int k, int d)
{
DType r = (X[b][i][d] - C[k][d]) / STD[k][d];
return ScalarConvert<DType, Acctype>::to(r * r);
}
DeviceTensor<DType, 3> X;
DeviceTensor<DType, 2> C;
DeviceTensor<DType, 2> STD;
};
template<typename DType, typename Acctype>
__global__ void Encoding_Dist_Forward_kernel (
DeviceTensor<DType, 3> KD,
DeviceTensor<DType, 3> X,
DeviceTensor<DType, 2> C,
DeviceTensor<DType, 2> STD) {
/* declarations of the variables */
int b, k, i, D;
/* Get the index and channels */
b = blockIdx.z;
k = blockIdx.x;
i = blockIdx.y;
D = X.getSize(2);
/* main operation */
KD2Op<DType, Acctype> g(X, C, STD);
KD[b][i][k] = reduceD<Acctype>(g, b, i, k, D);;
}
template<typename DType, typename Acctype>
struct EncGradXOp {
__device__ EncGradXOp(
DeviceTensor<DType, 3> gkd,
DeviceTensor<DType, 3> x,
DeviceTensor<DType, 2> c,
DeviceTensor<DType, 2> std) : GKD(gkd), X(x), C(c), STD(std) {}
// DeviceTensor<DType, 1> s, S(s)
__device__ __forceinline__ Acctype operator()(int b, int i, int k, int d) {
return ScalarConvert<DType, Acctype>::to(
2 * GKD[b][i][k] * (X[b][i][d] - C[k][d]) /
(STD[k][d] * STD[k][d]));
}
DeviceTensor<DType, 3> GKD;
DeviceTensor<DType, 3> X;
DeviceTensor<DType, 2> C;
DeviceTensor<DType, 2> STD;
// DeviceTensor<DType, 1> S;
};
template<typename DType, typename Acctype>
__global__ void Encoding_GradX_kernel (
DeviceTensor<DType, 3> GKD,
DeviceTensor<DType, 3> GX,
DeviceTensor<DType, 3> X,
DeviceTensor<DType, 2> C,
DeviceTensor<DType, 2> STD) {
// DeviceTensor<DType, 1> S
/* declarations of the variables */
int b, d, i, K;
/* Get the index and channels */
b = blockIdx.z;
i = blockIdx.y;
d = blockIdx.x;
K = C.getSize(0);
/* main operation */
EncGradXOp<DType, Acctype> g(GKD, X, C, STD);
GX[b][i][d] = reduceK<Acctype>(g, b, i, d, K);
}
template<typename DType, typename Acctype>
struct EncGradSTDOp {
__device__ EncGradSTDOp(
DeviceTensor<DType, 3> gkd,
DeviceTensor<DType, 3> x,
DeviceTensor<DType, 2> c,
DeviceTensor<DType, 2> std) : GKD(gkd), X(x), C(c), STD(std) {}
// DeviceTensor<DType, 1> s, S(s)
__device__ __forceinline__ Acctype operator()(int b, int i, int k, int d) {
return ScalarConvert<DType, Acctype>::to(
-2 * GKD[b][i][k] * (X[b][i][d] - C[k][d]) *
(X[b][i][d] - C[k][d]) / (STD[k][d] * STD[k][d] * STD[k][d]));
}
DeviceTensor<DType, 3> GKD;
DeviceTensor<DType, 3> X;
DeviceTensor<DType, 2> C;
DeviceTensor<DType, 2> STD;
// DeviceTensor<DType, 1> S;
};
template<typename DType, typename Acctype>
__global__ void Encoding_GradCSTD_kernel (
DeviceTensor<DType, 3> GKD,
DeviceTensor<DType, 2> GC,
DeviceTensor<DType, 2> GSTD,
DeviceTensor<DType, 3> X,
DeviceTensor<DType, 2> C,
DeviceTensor<DType, 2> STD) {
/* declarations of the variables */
int k, d, B, N;
/* Get the index and channels */
d = blockIdx.x;
k = blockIdx.y;
B = X.getSize(0);
N = X.getSize(1);
/* main operation */
EncGradXOp<DType, Acctype> g1(GKD, X, C, STD);
EncGradSTDOp<DType, Acctype> g2(GKD, X, C, STD);
GC[k][d] = -reduceBN<Acctype>(g1, k, d, B, N);
GSTD[k][d] += reduceBN<Acctype>(g2, k, d, B, N);
}
template<typename DType, typename Acctype>
struct EncGradSTDXOp {
__device__ EncGradSTDXOp(
DeviceTensor<DType, 2> gstd,
DeviceTensor<DType, 3> x,
DeviceTensor<DType, 2> c,
DeviceTensor<DType, 2> std) : GSTD(gstd), X(x), C(c), STD(std) {}
__device__ __forceinline__ Acctype operator()(int b, int i, int k, int d) {
return ScalarConvert<DType, Acctype>::to(
GSTD[k][d] * (X[b][i][d] - C[k][d]) / STD[k][d]);
}
DeviceTensor<DType, 2> GSTD;
DeviceTensor<DType, 3> X;
DeviceTensor<DType, 2> C;
DeviceTensor<DType, 2> STD;
};
template<typename DType, typename Acctype>
__global__ void Encoding_GradSTDX_kernel (
DeviceTensor<DType, 2> GSTD,
DeviceTensor<DType, 3> GX,
DeviceTensor<DType, 3> X,
DeviceTensor<DType, 2> C,
DeviceTensor<DType, 2> STD,
int N) {
/* declarations of the variables */
int b, d, i, K;
/* Get the index and channels */
b = blockIdx.z;
i = blockIdx.y;
d = blockIdx.x;
K = C.getSize(0);
/* main operation */
EncGradSTDXOp<DType, Acctype> g(GSTD, X, C, STD);
GX[b][i][d] += reduceK<Acctype>(g, b, i, d, K) / N;
}
template<typename DType, typename Acctype>
struct AggOpV2 {
__device__ AggOpV2(DeviceTensor<DType, 3> a,
DeviceTensor<DType, 3> x,
DeviceTensor<DType, 2> c,
DeviceTensor<DType, 2> std) : A(a), X(x), C(c), STD(std) {}
__device__ __forceinline__ Acctype operator()(int b, int i, int k, int d) {
return ScalarConvert<DType, Acctype>::to(A[b][i][k] * (X[b][i][d] - C[k][d]) /
STD[k][d]);
}
DeviceTensor<DType, 3> A;
DeviceTensor<DType, 3> X;
DeviceTensor<DType, 2> C;
DeviceTensor<DType, 2> STD;
};
template<typename DType, typename Acctype>
__global__ void AggregateV2_Forward_kernel (
DeviceTensor<DType, 3> E,
DeviceTensor<DType, 3> A,
DeviceTensor<DType, 3> X,
DeviceTensor<DType, 2> C,
DeviceTensor<DType, 2> STD) {
/* declarations of the variables */
int b, k, d, N;
/* Get the index and channels */
b = blockIdx.z;
d = blockIdx.x;
k = blockIdx.y;
N = X.getSize(1);
/* main operation */
AggOpV2<DType, Acctype> g(A, X, C, STD);
E[b][k][d] = reduceN<Acctype>(g, b, k, d, N);
}
template<typename DType, typename Acctype>
struct AggV2BackOp {
__device__ AggV2BackOp(DeviceTensor<DType, 3> g,
DeviceTensor<DType, 3> x,
DeviceTensor<DType, 2> c,
DeviceTensor<DType, 2> std) : G(g), X(x), C(c), STD(std) {}
__device__ __forceinline__ Acctype operator()(int b, int i, int k, int d) {
return ScalarConvert<DType, Acctype>::to(G[b][k][d] * (X[b][i][d] - C[k][d]) /
STD[k][d]);
}
DeviceTensor<DType, 3> G;
DeviceTensor<DType, 3> X;
DeviceTensor<DType, 2> C;
DeviceTensor<DType, 2> STD;
};
template<typename DType, typename Acctype>
__global__ void AggregateV2_Backward_kernel (
DeviceTensor<DType, 3> GA,
DeviceTensor<DType, 3> GE,
DeviceTensor<DType, 3> A,
DeviceTensor<DType, 3> X,
DeviceTensor<DType, 2> C,
DeviceTensor<DType, 2> STD) {
/* declarations of the variables */
int b, k, i, D;
/* Get the index and channels */
b = blockIdx.z;
i = blockIdx.y;
k = blockIdx.x;
D = GE.getSize(2);
/* main operation */
AggV2BackOp<DType, Acctype> g(GE, X, C, STD);
GA[b][i][k] = reduceD<Acctype>(g, b, i, k, D);
}
} // namespace
at::Tensor Encoding_Dist_Inference_Forward_CUDA(
const at::Tensor X_,
const at::Tensor C_,
const at::Tensor STD_) {
// const at::Tensor S_,
// X \in R^{B, N, D}, C \in R^{K, D}, S \in R^K
auto KD_ = at::zeros({X_.size(0), X_.size(1), C_.size(0)}, X_.options());
// E(x), E(x^2)
int N = X_.size(0) * X_.size(1);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 blocks(C_.size(0), X_.size(1), X_.size(0));
dim3 threads(getNumThreads(C_.size(1)));
// calculate the kernel distance
AT_DISPATCH_FLOATING_TYPES(X_.type(), "Encoding_Dist_Inference_Forward_CUDA", ([&] {
/* Device tensors */
DeviceTensor<scalar_t, 3> KD = devicetensor<scalar_t, 3>(KD_);
DeviceTensor<scalar_t, 3> X = devicetensor<scalar_t, 3>(X_);
DeviceTensor<scalar_t, 2> C = devicetensor<scalar_t, 2>(C_);
DeviceTensor<scalar_t, 2> STD = devicetensor<scalar_t, 2>(STD_);
/* kernel function */
hipLaunchKernelGGL(( Encoding_Dist_Forward_kernel<scalar_t, scalar_t>)
, dim3(blocks), dim3(threads), 0, stream, KD, X, C, STD);
}));
AT_ASSERT(hipGetLastError() == hipSuccess);
return KD_;
}
std::vector<at::Tensor> Encoding_Dist_Inference_Backward_CUDA(
const at::Tensor GKD_,
const at::Tensor KD_,
const at::Tensor X_,
const at::Tensor C_,
const at::Tensor STD_) {
auto GX_ = at::zeros_like(X_);
auto GC_ = at::zeros_like(C_);
auto GSTD_ = at::zeros_like(STD_);
/* kernel function */
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 blocks1(X_.size(2), X_.size(1), X_.size(0));
dim3 threads1(getNumThreads(C_.size(0)));
dim3 blocks2(C_.size(1), C_.size(0));
dim3 threads2(getNumThreads(X_.size(1)));
int N = X_.size(0) * X_.size(1);
AT_DISPATCH_FLOATING_TYPES(X_.type(), "Encoding_Dist_Backward_CUDA", ([&] {
/* Device tensors */
DeviceTensor<scalar_t, 3> GKD = devicetensor<scalar_t, 3>(GKD_);
DeviceTensor<scalar_t, 2> GSTD = devicetensor<scalar_t, 2>(GSTD_);
DeviceTensor<scalar_t, 3> GX = devicetensor<scalar_t, 3>(GX_);
DeviceTensor<scalar_t, 2> GC = devicetensor<scalar_t, 2>(GC_);
DeviceTensor<scalar_t, 3> X = devicetensor<scalar_t, 3>(X_);
DeviceTensor<scalar_t, 2> C = devicetensor<scalar_t, 2>(C_);
DeviceTensor<scalar_t, 2> STD = devicetensor<scalar_t, 2>(STD_);
hipLaunchKernelGGL(( Encoding_GradX_kernel<scalar_t, scalar_t>)
, dim3(blocks1), dim3(threads1), 0, stream, GKD, GX, X, C, STD);
AT_ASSERT(hipGetLastError() == hipSuccess);
hipLaunchKernelGGL(( Encoding_GradCSTD_kernel<scalar_t, scalar_t>)
, dim3(blocks2), dim3(threads2), 0, stream, GKD, GC, GSTD, X, C, STD);
AT_ASSERT(hipGetLastError() == hipSuccess);
}));
return {GX_, GC_, GSTD_};
}
std::vector<at::Tensor> Encoding_Dist_Forward_CUDA(
const at::Tensor X_,
const at::Tensor C_,
double eps) {
// const at::Tensor S_,
// X \in R^{B, N, D}, C \in R^{K, D}, S \in R^K
auto KD_ = at::zeros({X_.size(0), X_.size(1), C_.size(0)}, X_.options());
// E(x), E(x^2)
int N = X_.size(0) * X_.size(1);
auto SVar_ = (X_.pow(2).sum(0).sum(0).view({1, X_.size(2)}) -
2 * C_ * X_.sum(0).sum(0).view({1, X_.size(2)})).expand_as(C_) +
C_.pow(2) * N;
auto STD_ = at::sqrt(SVar_ / N + eps);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 blocks(C_.size(0), X_.size(1), X_.size(0));
dim3 threads(getNumThreads(C_.size(1)));
// calculate the kernel distance
AT_DISPATCH_FLOATING_TYPES(X_.type(), "Encoding_Dist_Forward_CUDA", ([&] {
/* Device tensors */
DeviceTensor<scalar_t, 3> KD = devicetensor<scalar_t, 3>(KD_);
DeviceTensor<scalar_t, 3> X = devicetensor<scalar_t, 3>(X_);
DeviceTensor<scalar_t, 2> C = devicetensor<scalar_t, 2>(C_);
DeviceTensor<scalar_t, 2> STD = devicetensor<scalar_t, 2>(STD_);
/* kernel function */
hipLaunchKernelGGL(( Encoding_Dist_Forward_kernel<scalar_t, scalar_t>)
, dim3(blocks), dim3(threads), 0, stream, KD, X, C, STD);
}));
AT_ASSERT(hipGetLastError() == hipSuccess);
return {KD_, STD_, SVar_ / (N - 1)};
}
std::vector<at::Tensor> Encoding_Dist_Backward_CUDA(
const at::Tensor GKD_,
const at::Tensor GSTD_,
const at::Tensor KD_,
const at::Tensor X_,
const at::Tensor C_,
const at::Tensor STD_) {
auto GX_ = at::zeros_like(X_);
auto GC_ = at::zeros_like(C_);
auto GSTD2_ = GSTD_.clone();
/* kernel function */
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 blocks1(X_.size(2), X_.size(1), X_.size(0));
dim3 threads1(getNumThreads(C_.size(0)));
dim3 blocks2(C_.size(1), C_.size(0));
dim3 threads2(getNumThreads(X_.size(1)));
int N = X_.size(0) * X_.size(1);
AT_DISPATCH_FLOATING_TYPES(X_.type(), "Encoding_Dist_Backward_CUDA", ([&] {
/* Device tensors */
DeviceTensor<scalar_t, 3> GKD = devicetensor<scalar_t, 3>(GKD_);
DeviceTensor<scalar_t, 2> GSTD = devicetensor<scalar_t, 2>(GSTD2_);
DeviceTensor<scalar_t, 3> GX = devicetensor<scalar_t, 3>(GX_);
DeviceTensor<scalar_t, 2> GC = devicetensor<scalar_t, 2>(GC_);
DeviceTensor<scalar_t, 3> X = devicetensor<scalar_t, 3>(X_);
DeviceTensor<scalar_t, 2> C = devicetensor<scalar_t, 2>(C_);
DeviceTensor<scalar_t, 2> STD = devicetensor<scalar_t, 2>(STD_);
hipLaunchKernelGGL(( Encoding_GradX_kernel<scalar_t, scalar_t>)
, dim3(blocks1), dim3(threads1), 0, stream, GKD, GX, X, C, STD);
AT_ASSERT(hipGetLastError() == hipSuccess);
hipLaunchKernelGGL(( Encoding_GradCSTD_kernel<scalar_t, scalar_t>)
, dim3(blocks2), dim3(threads2), 0, stream, GKD, GC, GSTD, X, C, STD);
AT_ASSERT(hipGetLastError() == hipSuccess);
hipLaunchKernelGGL(( Encoding_GradSTDX_kernel<scalar_t, scalar_t>)
, dim3(blocks1), dim3(threads1), 0, stream, GSTD, GX, X, C, STD, N);
AT_ASSERT(hipGetLastError() == hipSuccess);
}));
// d_sigma/d_c
GC_ = GC_ - GSTD2_ * (X_.mean(0).mean(0) - C_) / STD_;
return {GX_, GC_};
}
at::Tensor AggregateV2_Forward_CUDA(
const at::Tensor A_,
const at::Tensor X_,
const at::Tensor C_,
const at::Tensor STD_) {
/* Device tensors */
auto E_ = at::zeros({A_.size(0), C_.size(0), C_.size(1)}, A_.options());
// auto IS_ = 1.0f / (S_ + eps).sqrt();
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
// B, K, D
dim3 blocks(C_.size(1), C_.size(0), X_.size(0));
dim3 threads(getNumThreads(X_.size(1)));
AT_DISPATCH_FLOATING_TYPES(A_.type(), "Aggregate_Forward_CUDA", ([&] {
DeviceTensor<scalar_t, 3> E = devicetensor<scalar_t, 3>(E_);
DeviceTensor<scalar_t, 3> A = devicetensor<scalar_t, 3>(A_);
DeviceTensor<scalar_t, 3> X = devicetensor<scalar_t, 3>(X_);
DeviceTensor<scalar_t, 2> C = devicetensor<scalar_t, 2>(C_);
DeviceTensor<scalar_t, 2> STD = devicetensor<scalar_t, 2>(STD_);
/* kernel function */
hipLaunchKernelGGL(( AggregateV2_Forward_kernel<scalar_t, scalar_t>)
, dim3(blocks), dim3(threads), 0, stream, E, A, X, C, STD);
}));
AT_ASSERT(hipGetLastError() == hipSuccess);
return E_;
}
std::vector<at::Tensor> AggregateV2_Backward_CUDA(
const at::Tensor GE_,
const at::Tensor E_,
const at::Tensor A_,
const at::Tensor X_,
const at::Tensor C_,
const at::Tensor STD_) {
auto gradA_ = at::zeros_like(A_);
auto gradX_ = at::bmm(A_ , (GE_ / STD_.unsqueeze(0)));
auto gradC_ = -(A_.sum(1).unsqueeze(2) * GE_ / STD_.unsqueeze(0)).sum(0);
auto gradSTD_ = -(GE_ * E_).sum(0) / STD_;
// auto gradS_ = -0.5 * (GE_ * E_).sum(2).sum(0) / S_;
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
// B, K, D
dim3 blocks(C_.size(0), X_.size(1), X_.size(0));
dim3 threads(getNumThreads(C_.size(1)));
AT_DISPATCH_FLOATING_TYPES(A_.type(), "Aggregate_Backward_CUDA", ([&] {
/* Device tensors */
DeviceTensor<scalar_t, 3> GA = devicetensor<scalar_t, 3>(gradA_);
DeviceTensor<scalar_t, 3> GE = devicetensor<scalar_t, 3>(GE_);
DeviceTensor<scalar_t, 3> A = devicetensor<scalar_t, 3>(A_);
DeviceTensor<scalar_t, 3> X = devicetensor<scalar_t, 3>(X_);
DeviceTensor<scalar_t, 2> C = devicetensor<scalar_t, 2>(C_);
DeviceTensor<scalar_t, 2> STD = devicetensor<scalar_t, 2>(STD_);
hipLaunchKernelGGL(( AggregateV2_Backward_kernel<scalar_t, scalar_t>)
, dim3(blocks), dim3(threads), 0, stream, GA, GE, A, X, C, STD);
}));
AT_ASSERT(hipGetLastError() == hipSuccess);
return {gradA_, gradX_, gradC_, gradSTD_};
}
| 5a5c448251ab30a489cdc35bbb164446c0267fa2.cu | #include <vector>
//#include <torch/tensor.h>
#include <ATen/ATen.h>
#include <ATen/Functions.h>
#include <ATen/cuda/CUDAContext.h>
#include "common.h"
#include "device_tensor.h"
namespace {
template<typename DType, typename Acctype>
struct KD2Op {
__device__ KD2Op(DeviceTensor<DType, 3> x,
DeviceTensor<DType, 2> c,
DeviceTensor<DType, 2> std) : X(x), C(c), STD(std) {}
__device__ __forceinline__ Acctype operator()(int b, int i, int k, int d)
{
DType r = (X[b][i][d] - C[k][d]) / STD[k][d];
return ScalarConvert<DType, Acctype>::to(r * r);
}
DeviceTensor<DType, 3> X;
DeviceTensor<DType, 2> C;
DeviceTensor<DType, 2> STD;
};
template<typename DType, typename Acctype>
__global__ void Encoding_Dist_Forward_kernel (
DeviceTensor<DType, 3> KD,
DeviceTensor<DType, 3> X,
DeviceTensor<DType, 2> C,
DeviceTensor<DType, 2> STD) {
/* declarations of the variables */
int b, k, i, D;
/* Get the index and channels */
b = blockIdx.z;
k = blockIdx.x;
i = blockIdx.y;
D = X.getSize(2);
/* main operation */
KD2Op<DType, Acctype> g(X, C, STD);
KD[b][i][k] = reduceD<Acctype>(g, b, i, k, D);;
}
template<typename DType, typename Acctype>
struct EncGradXOp {
__device__ EncGradXOp(
DeviceTensor<DType, 3> gkd,
DeviceTensor<DType, 3> x,
DeviceTensor<DType, 2> c,
DeviceTensor<DType, 2> std) : GKD(gkd), X(x), C(c), STD(std) {}
// DeviceTensor<DType, 1> s, S(s)
__device__ __forceinline__ Acctype operator()(int b, int i, int k, int d) {
return ScalarConvert<DType, Acctype>::to(
2 * GKD[b][i][k] * (X[b][i][d] - C[k][d]) /
(STD[k][d] * STD[k][d]));
}
DeviceTensor<DType, 3> GKD;
DeviceTensor<DType, 3> X;
DeviceTensor<DType, 2> C;
DeviceTensor<DType, 2> STD;
// DeviceTensor<DType, 1> S;
};
template<typename DType, typename Acctype>
__global__ void Encoding_GradX_kernel (
DeviceTensor<DType, 3> GKD,
DeviceTensor<DType, 3> GX,
DeviceTensor<DType, 3> X,
DeviceTensor<DType, 2> C,
DeviceTensor<DType, 2> STD) {
// DeviceTensor<DType, 1> S
/* declarations of the variables */
int b, d, i, K;
/* Get the index and channels */
b = blockIdx.z;
i = blockIdx.y;
d = blockIdx.x;
K = C.getSize(0);
/* main operation */
EncGradXOp<DType, Acctype> g(GKD, X, C, STD);
GX[b][i][d] = reduceK<Acctype>(g, b, i, d, K);
}
template<typename DType, typename Acctype>
struct EncGradSTDOp {
__device__ EncGradSTDOp(
DeviceTensor<DType, 3> gkd,
DeviceTensor<DType, 3> x,
DeviceTensor<DType, 2> c,
DeviceTensor<DType, 2> std) : GKD(gkd), X(x), C(c), STD(std) {}
// DeviceTensor<DType, 1> s, S(s)
__device__ __forceinline__ Acctype operator()(int b, int i, int k, int d) {
return ScalarConvert<DType, Acctype>::to(
-2 * GKD[b][i][k] * (X[b][i][d] - C[k][d]) *
(X[b][i][d] - C[k][d]) / (STD[k][d] * STD[k][d] * STD[k][d]));
}
DeviceTensor<DType, 3> GKD;
DeviceTensor<DType, 3> X;
DeviceTensor<DType, 2> C;
DeviceTensor<DType, 2> STD;
// DeviceTensor<DType, 1> S;
};
template<typename DType, typename Acctype>
__global__ void Encoding_GradCSTD_kernel (
DeviceTensor<DType, 3> GKD,
DeviceTensor<DType, 2> GC,
DeviceTensor<DType, 2> GSTD,
DeviceTensor<DType, 3> X,
DeviceTensor<DType, 2> C,
DeviceTensor<DType, 2> STD) {
/* declarations of the variables */
int k, d, B, N;
/* Get the index and channels */
d = blockIdx.x;
k = blockIdx.y;
B = X.getSize(0);
N = X.getSize(1);
/* main operation */
EncGradXOp<DType, Acctype> g1(GKD, X, C, STD);
EncGradSTDOp<DType, Acctype> g2(GKD, X, C, STD);
GC[k][d] = -reduceBN<Acctype>(g1, k, d, B, N);
GSTD[k][d] += reduceBN<Acctype>(g2, k, d, B, N);
}
template<typename DType, typename Acctype>
struct EncGradSTDXOp {
__device__ EncGradSTDXOp(
DeviceTensor<DType, 2> gstd,
DeviceTensor<DType, 3> x,
DeviceTensor<DType, 2> c,
DeviceTensor<DType, 2> std) : GSTD(gstd), X(x), C(c), STD(std) {}
__device__ __forceinline__ Acctype operator()(int b, int i, int k, int d) {
return ScalarConvert<DType, Acctype>::to(
GSTD[k][d] * (X[b][i][d] - C[k][d]) / STD[k][d]);
}
DeviceTensor<DType, 2> GSTD;
DeviceTensor<DType, 3> X;
DeviceTensor<DType, 2> C;
DeviceTensor<DType, 2> STD;
};
template<typename DType, typename Acctype>
__global__ void Encoding_GradSTDX_kernel (
DeviceTensor<DType, 2> GSTD,
DeviceTensor<DType, 3> GX,
DeviceTensor<DType, 3> X,
DeviceTensor<DType, 2> C,
DeviceTensor<DType, 2> STD,
int N) {
/* declarations of the variables */
int b, d, i, K;
/* Get the index and channels */
b = blockIdx.z;
i = blockIdx.y;
d = blockIdx.x;
K = C.getSize(0);
/* main operation */
EncGradSTDXOp<DType, Acctype> g(GSTD, X, C, STD);
GX[b][i][d] += reduceK<Acctype>(g, b, i, d, K) / N;
}
template<typename DType, typename Acctype>
struct AggOpV2 {
__device__ AggOpV2(DeviceTensor<DType, 3> a,
DeviceTensor<DType, 3> x,
DeviceTensor<DType, 2> c,
DeviceTensor<DType, 2> std) : A(a), X(x), C(c), STD(std) {}
__device__ __forceinline__ Acctype operator()(int b, int i, int k, int d) {
return ScalarConvert<DType, Acctype>::to(A[b][i][k] * (X[b][i][d] - C[k][d]) /
STD[k][d]);
}
DeviceTensor<DType, 3> A;
DeviceTensor<DType, 3> X;
DeviceTensor<DType, 2> C;
DeviceTensor<DType, 2> STD;
};
template<typename DType, typename Acctype>
__global__ void AggregateV2_Forward_kernel (
DeviceTensor<DType, 3> E,
DeviceTensor<DType, 3> A,
DeviceTensor<DType, 3> X,
DeviceTensor<DType, 2> C,
DeviceTensor<DType, 2> STD) {
/* declarations of the variables */
int b, k, d, N;
/* Get the index and channels */
b = blockIdx.z;
d = blockIdx.x;
k = blockIdx.y;
N = X.getSize(1);
/* main operation */
AggOpV2<DType, Acctype> g(A, X, C, STD);
E[b][k][d] = reduceN<Acctype>(g, b, k, d, N);
}
template<typename DType, typename Acctype>
struct AggV2BackOp {
__device__ AggV2BackOp(DeviceTensor<DType, 3> g,
DeviceTensor<DType, 3> x,
DeviceTensor<DType, 2> c,
DeviceTensor<DType, 2> std) : G(g), X(x), C(c), STD(std) {}
__device__ __forceinline__ Acctype operator()(int b, int i, int k, int d) {
return ScalarConvert<DType, Acctype>::to(G[b][k][d] * (X[b][i][d] - C[k][d]) /
STD[k][d]);
}
DeviceTensor<DType, 3> G;
DeviceTensor<DType, 3> X;
DeviceTensor<DType, 2> C;
DeviceTensor<DType, 2> STD;
};
template<typename DType, typename Acctype>
__global__ void AggregateV2_Backward_kernel (
DeviceTensor<DType, 3> GA,
DeviceTensor<DType, 3> GE,
DeviceTensor<DType, 3> A,
DeviceTensor<DType, 3> X,
DeviceTensor<DType, 2> C,
DeviceTensor<DType, 2> STD) {
/* declarations of the variables */
int b, k, i, D;
/* Get the index and channels */
b = blockIdx.z;
i = blockIdx.y;
k = blockIdx.x;
D = GE.getSize(2);
/* main operation */
AggV2BackOp<DType, Acctype> g(GE, X, C, STD);
GA[b][i][k] = reduceD<Acctype>(g, b, i, k, D);
}
} // namespace
at::Tensor Encoding_Dist_Inference_Forward_CUDA(
const at::Tensor X_,
const at::Tensor C_,
const at::Tensor STD_) {
// const at::Tensor S_,
// X \in R^{B, N, D}, C \in R^{K, D}, S \in R^K
auto KD_ = at::zeros({X_.size(0), X_.size(1), C_.size(0)}, X_.options());
// E(x), E(x^2)
int N = X_.size(0) * X_.size(1);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 blocks(C_.size(0), X_.size(1), X_.size(0));
dim3 threads(getNumThreads(C_.size(1)));
// calculate the kernel distance
AT_DISPATCH_FLOATING_TYPES(X_.type(), "Encoding_Dist_Inference_Forward_CUDA", ([&] {
/* Device tensors */
DeviceTensor<scalar_t, 3> KD = devicetensor<scalar_t, 3>(KD_);
DeviceTensor<scalar_t, 3> X = devicetensor<scalar_t, 3>(X_);
DeviceTensor<scalar_t, 2> C = devicetensor<scalar_t, 2>(C_);
DeviceTensor<scalar_t, 2> STD = devicetensor<scalar_t, 2>(STD_);
/* kernel function */
Encoding_Dist_Forward_kernel<scalar_t, scalar_t>
<<<blocks, threads, 0, stream>>> (KD, X, C, STD);
}));
AT_ASSERT(cudaGetLastError() == cudaSuccess);
return KD_;
}
std::vector<at::Tensor> Encoding_Dist_Inference_Backward_CUDA(
const at::Tensor GKD_,
const at::Tensor KD_,
const at::Tensor X_,
const at::Tensor C_,
const at::Tensor STD_) {
auto GX_ = at::zeros_like(X_);
auto GC_ = at::zeros_like(C_);
auto GSTD_ = at::zeros_like(STD_);
/* kernel function */
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 blocks1(X_.size(2), X_.size(1), X_.size(0));
dim3 threads1(getNumThreads(C_.size(0)));
dim3 blocks2(C_.size(1), C_.size(0));
dim3 threads2(getNumThreads(X_.size(1)));
int N = X_.size(0) * X_.size(1);
AT_DISPATCH_FLOATING_TYPES(X_.type(), "Encoding_Dist_Backward_CUDA", ([&] {
/* Device tensors */
DeviceTensor<scalar_t, 3> GKD = devicetensor<scalar_t, 3>(GKD_);
DeviceTensor<scalar_t, 2> GSTD = devicetensor<scalar_t, 2>(GSTD_);
DeviceTensor<scalar_t, 3> GX = devicetensor<scalar_t, 3>(GX_);
DeviceTensor<scalar_t, 2> GC = devicetensor<scalar_t, 2>(GC_);
DeviceTensor<scalar_t, 3> X = devicetensor<scalar_t, 3>(X_);
DeviceTensor<scalar_t, 2> C = devicetensor<scalar_t, 2>(C_);
DeviceTensor<scalar_t, 2> STD = devicetensor<scalar_t, 2>(STD_);
Encoding_GradX_kernel<scalar_t, scalar_t>
<<<blocks1, threads1, 0, stream>>> (GKD, GX, X, C, STD);
AT_ASSERT(cudaGetLastError() == cudaSuccess);
Encoding_GradCSTD_kernel<scalar_t, scalar_t>
<<<blocks2, threads2, 0, stream>>> (GKD, GC, GSTD, X, C, STD);
AT_ASSERT(cudaGetLastError() == cudaSuccess);
}));
return {GX_, GC_, GSTD_};
}
std::vector<at::Tensor> Encoding_Dist_Forward_CUDA(
const at::Tensor X_,
const at::Tensor C_,
double eps) {
// const at::Tensor S_,
// X \in R^{B, N, D}, C \in R^{K, D}, S \in R^K
auto KD_ = at::zeros({X_.size(0), X_.size(1), C_.size(0)}, X_.options());
// E(x), E(x^2)
int N = X_.size(0) * X_.size(1);
auto SVar_ = (X_.pow(2).sum(0).sum(0).view({1, X_.size(2)}) -
2 * C_ * X_.sum(0).sum(0).view({1, X_.size(2)})).expand_as(C_) +
C_.pow(2) * N;
auto STD_ = at::sqrt(SVar_ / N + eps);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 blocks(C_.size(0), X_.size(1), X_.size(0));
dim3 threads(getNumThreads(C_.size(1)));
// calculate the kernel distance
AT_DISPATCH_FLOATING_TYPES(X_.type(), "Encoding_Dist_Forward_CUDA", ([&] {
/* Device tensors */
DeviceTensor<scalar_t, 3> KD = devicetensor<scalar_t, 3>(KD_);
DeviceTensor<scalar_t, 3> X = devicetensor<scalar_t, 3>(X_);
DeviceTensor<scalar_t, 2> C = devicetensor<scalar_t, 2>(C_);
DeviceTensor<scalar_t, 2> STD = devicetensor<scalar_t, 2>(STD_);
/* kernel function */
Encoding_Dist_Forward_kernel<scalar_t, scalar_t>
<<<blocks, threads, 0, stream>>> (KD, X, C, STD);
}));
AT_ASSERT(cudaGetLastError() == cudaSuccess);
return {KD_, STD_, SVar_ / (N - 1)};
}
std::vector<at::Tensor> Encoding_Dist_Backward_CUDA(
const at::Tensor GKD_,
const at::Tensor GSTD_,
const at::Tensor KD_,
const at::Tensor X_,
const at::Tensor C_,
const at::Tensor STD_) {
auto GX_ = at::zeros_like(X_);
auto GC_ = at::zeros_like(C_);
auto GSTD2_ = GSTD_.clone();
/* kernel function */
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 blocks1(X_.size(2), X_.size(1), X_.size(0));
dim3 threads1(getNumThreads(C_.size(0)));
dim3 blocks2(C_.size(1), C_.size(0));
dim3 threads2(getNumThreads(X_.size(1)));
int N = X_.size(0) * X_.size(1);
AT_DISPATCH_FLOATING_TYPES(X_.type(), "Encoding_Dist_Backward_CUDA", ([&] {
/* Device tensors */
DeviceTensor<scalar_t, 3> GKD = devicetensor<scalar_t, 3>(GKD_);
DeviceTensor<scalar_t, 2> GSTD = devicetensor<scalar_t, 2>(GSTD2_);
DeviceTensor<scalar_t, 3> GX = devicetensor<scalar_t, 3>(GX_);
DeviceTensor<scalar_t, 2> GC = devicetensor<scalar_t, 2>(GC_);
DeviceTensor<scalar_t, 3> X = devicetensor<scalar_t, 3>(X_);
DeviceTensor<scalar_t, 2> C = devicetensor<scalar_t, 2>(C_);
DeviceTensor<scalar_t, 2> STD = devicetensor<scalar_t, 2>(STD_);
Encoding_GradX_kernel<scalar_t, scalar_t>
<<<blocks1, threads1, 0, stream>>> (GKD, GX, X, C, STD);
AT_ASSERT(cudaGetLastError() == cudaSuccess);
Encoding_GradCSTD_kernel<scalar_t, scalar_t>
<<<blocks2, threads2, 0, stream>>> (GKD, GC, GSTD, X, C, STD);
AT_ASSERT(cudaGetLastError() == cudaSuccess);
Encoding_GradSTDX_kernel<scalar_t, scalar_t>
<<<blocks1, threads1, 0, stream>>> (GSTD, GX, X, C, STD, N);
AT_ASSERT(cudaGetLastError() == cudaSuccess);
}));
// d_sigma/d_c
GC_ = GC_ - GSTD2_ * (X_.mean(0).mean(0) - C_) / STD_;
return {GX_, GC_};
}
at::Tensor AggregateV2_Forward_CUDA(
const at::Tensor A_,
const at::Tensor X_,
const at::Tensor C_,
const at::Tensor STD_) {
/* Device tensors */
auto E_ = at::zeros({A_.size(0), C_.size(0), C_.size(1)}, A_.options());
// auto IS_ = 1.0f / (S_ + eps).sqrt();
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
// B, K, D
dim3 blocks(C_.size(1), C_.size(0), X_.size(0));
dim3 threads(getNumThreads(X_.size(1)));
AT_DISPATCH_FLOATING_TYPES(A_.type(), "Aggregate_Forward_CUDA", ([&] {
DeviceTensor<scalar_t, 3> E = devicetensor<scalar_t, 3>(E_);
DeviceTensor<scalar_t, 3> A = devicetensor<scalar_t, 3>(A_);
DeviceTensor<scalar_t, 3> X = devicetensor<scalar_t, 3>(X_);
DeviceTensor<scalar_t, 2> C = devicetensor<scalar_t, 2>(C_);
DeviceTensor<scalar_t, 2> STD = devicetensor<scalar_t, 2>(STD_);
/* kernel function */
AggregateV2_Forward_kernel<scalar_t, scalar_t>
<<<blocks, threads, 0, stream>>>(E, A, X, C, STD);
}));
AT_ASSERT(cudaGetLastError() == cudaSuccess);
return E_;
}
std::vector<at::Tensor> AggregateV2_Backward_CUDA(
const at::Tensor GE_,
const at::Tensor E_,
const at::Tensor A_,
const at::Tensor X_,
const at::Tensor C_,
const at::Tensor STD_) {
auto gradA_ = at::zeros_like(A_);
auto gradX_ = at::bmm(A_ , (GE_ / STD_.unsqueeze(0)));
auto gradC_ = -(A_.sum(1).unsqueeze(2) * GE_ / STD_.unsqueeze(0)).sum(0);
auto gradSTD_ = -(GE_ * E_).sum(0) / STD_;
// auto gradS_ = -0.5 * (GE_ * E_).sum(2).sum(0) / S_;
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
// B, K, D
dim3 blocks(C_.size(0), X_.size(1), X_.size(0));
dim3 threads(getNumThreads(C_.size(1)));
AT_DISPATCH_FLOATING_TYPES(A_.type(), "Aggregate_Backward_CUDA", ([&] {
/* Device tensors */
DeviceTensor<scalar_t, 3> GA = devicetensor<scalar_t, 3>(gradA_);
DeviceTensor<scalar_t, 3> GE = devicetensor<scalar_t, 3>(GE_);
DeviceTensor<scalar_t, 3> A = devicetensor<scalar_t, 3>(A_);
DeviceTensor<scalar_t, 3> X = devicetensor<scalar_t, 3>(X_);
DeviceTensor<scalar_t, 2> C = devicetensor<scalar_t, 2>(C_);
DeviceTensor<scalar_t, 2> STD = devicetensor<scalar_t, 2>(STD_);
AggregateV2_Backward_kernel<scalar_t, scalar_t>
<<<blocks, threads, 0, stream>>> (GA, GE, A, X, C, STD);
}));
AT_ASSERT(cudaGetLastError() == cudaSuccess);
return {gradA_, gradX_, gradC_, gradSTD_};
}
|
2e80b04d3c636ae1304bbd51836378d31d992f83.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <amgx_types/util.h>
std::ostream &operator<<(std::ostream &os, const hipComplex &x)
{
os << amgx::types::get_re(x) << " " << amgx::types::get_im(x);
return os;
}
std::ostream &operator<<(std::ostream &os, const hipDoubleComplex &x)
{
os << amgx::types::get_re(x) << " " << amgx::types::get_im(x);
return os;
} | 2e80b04d3c636ae1304bbd51836378d31d992f83.cu | /* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <amgx_types/util.h>
std::ostream &operator<<(std::ostream &os, const cuComplex &x)
{
os << amgx::types::get_re(x) << " " << amgx::types::get_im(x);
return os;
}
std::ostream &operator<<(std::ostream &os, const cuDoubleComplex &x)
{
os << amgx::types::get_re(x) << " " << amgx::types::get_im(x);
return os;
} |
926387f7445b053b34faeeb8a7bbe0dba153fd29.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "utils.h"
// computeSum is used for two different things. v hacky
// templated by function to how to scan
template <typename T>
__global__
void hillis_steele_scan_kernel(const T * const d_arr, T * const d_out, const size_t N,
const size_t binMask, const bool computeSum)
{
extern __shared__ T temp[];
int bdx = blockDim.x;
int tid = threadIdx.x;
int x = blockIdx.x * bdx + tid;
if (x >= N)
return;
int in = 1;
int out = 0;
// need to make this exclusive scan
temp[tid] = computeSum ? (((d_arr[x] & binMask) == 0) ? 1 : 0) : d_arr[x];
__syncthreads();
int s = 1;
while (s < bdx) { // used to be N
in = out;
out = 1 - in;
temp[out * bdx + tid] = temp[in * bdx + tid] + ((tid >= s) ? temp[in * bdx + tid - s] : 0);
__syncthreads();
s <<= 1;
}
d_out[x] = temp[out * bdx + tid];
// fill in block sums
if (computeSum)
if (threadIdx.x == blockDim.x - 1)
d_out[N + blockIdx.x] = temp[out * bdx + tid];
}
template <typename T>
__global__
void add_sums_to_scan_kernel(T * const d_arr, const T * const d_block_sums, int N)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x >= N)
return;
d_arr[x] += d_block_sums[blockIdx.x];
}
template <class T>
void hillis_steele_scan(const T * const d_input, T * const d_newPos, T * const d_block_sums,
const size_t N, const size_t binMask, const size_t gridSize, const size_t blockSize)
{
hipLaunchKernelGGL(( hillis_steele_scan_kernel), dim3(gridSize), dim3(blockSize), 2 * blockSize * sizeof(T), 0, d_input, d_newPos, N, binMask, true);
if (gridSize > 1) {
if (gridSize > 1024) {
// block sums : 1 2 3 ... 1024 1 2 3 ... 1024
// form block scans over these to get
// 1 3 6 ... <..> 1 3 6 ... <..>
const size_t gridSize2 = round_up(gridSize, (size_t) 1024);
hipLaunchKernelGGL(( hillis_steele_scan_kernel), dim3(gridSize2), dim3(1024), 2 * 1024 * sizeof(T), 0, d_newPos + N, d_block_sums, gridSize, binMask, false);
T *h_block_sums = (T *) malloc(gridSize * sizeof(T));
checkCudaErrors(hipMemcpy(h_block_sums, d_block_sums, gridSize * sizeof(T), hipMemcpyDeviceToHost));
T prev = 0;
for (size_t i = 1024; i < gridSize; ++i) {
if (i % 1024 == 0)
prev = h_block_sums[i-1];
h_block_sums[i] += prev;
}
checkCudaErrors(hipMemcpy(d_block_sums, h_block_sums, gridSize * sizeof(T), hipMemcpyHostToDevice));
free(h_block_sums);
} else {
hipLaunchKernelGGL(( hillis_steele_scan_kernel), dim3(1), dim3(gridSize), 0, 0, d_newPos + N, d_block_sums, gridSize, binMask, false);
}
hipLaunchKernelGGL(( add_sums_to_scan_kernel), dim3(gridSize-1), dim3(blockSize), 0, 0, d_newPos + blockSize, d_block_sums, N - blockSize);
}
}
// initialize specific instantiations of hillis_steele_scan
template void hillis_steele_scan<ull>(const ull * const, ull * const, ull * const,
const size_t, const size_t, const size_t, const size_t); | 926387f7445b053b34faeeb8a7bbe0dba153fd29.cu | #include "utils.h"
// computeSum is used for two different things. v hacky
// templated by function to how to scan
template <typename T>
__global__
void hillis_steele_scan_kernel(const T * const d_arr, T * const d_out, const size_t N,
const size_t binMask, const bool computeSum)
{
extern __shared__ T temp[];
int bdx = blockDim.x;
int tid = threadIdx.x;
int x = blockIdx.x * bdx + tid;
if (x >= N)
return;
int in = 1;
int out = 0;
// need to make this exclusive scan
temp[tid] = computeSum ? (((d_arr[x] & binMask) == 0) ? 1 : 0) : d_arr[x];
__syncthreads();
int s = 1;
while (s < bdx) { // used to be N
in = out;
out = 1 - in;
temp[out * bdx + tid] = temp[in * bdx + tid] + ((tid >= s) ? temp[in * bdx + tid - s] : 0);
__syncthreads();
s <<= 1;
}
d_out[x] = temp[out * bdx + tid];
// fill in block sums
if (computeSum)
if (threadIdx.x == blockDim.x - 1)
d_out[N + blockIdx.x] = temp[out * bdx + tid];
}
template <typename T>
__global__
void add_sums_to_scan_kernel(T * const d_arr, const T * const d_block_sums, int N)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x >= N)
return;
d_arr[x] += d_block_sums[blockIdx.x];
}
template <class T>
void hillis_steele_scan(const T * const d_input, T * const d_newPos, T * const d_block_sums,
const size_t N, const size_t binMask, const size_t gridSize, const size_t blockSize)
{
hillis_steele_scan_kernel<<<gridSize, blockSize, 2 * blockSize * sizeof(T)>>>(d_input, d_newPos, N, binMask, true);
if (gridSize > 1) {
if (gridSize > 1024) {
// block sums : 1 2 3 ... 1024 1 2 3 ... 1024
// form block scans over these to get
// 1 3 6 ... <..> 1 3 6 ... <..>
const size_t gridSize2 = round_up(gridSize, (size_t) 1024);
hillis_steele_scan_kernel<<<gridSize2, 1024, 2 * 1024 * sizeof(T)>>>(d_newPos + N, d_block_sums, gridSize, binMask, false);
T *h_block_sums = (T *) malloc(gridSize * sizeof(T));
checkCudaErrors(cudaMemcpy(h_block_sums, d_block_sums, gridSize * sizeof(T), cudaMemcpyDeviceToHost));
T prev = 0;
for (size_t i = 1024; i < gridSize; ++i) {
if (i % 1024 == 0)
prev = h_block_sums[i-1];
h_block_sums[i] += prev;
}
checkCudaErrors(cudaMemcpy(d_block_sums, h_block_sums, gridSize * sizeof(T), cudaMemcpyHostToDevice));
free(h_block_sums);
} else {
hillis_steele_scan_kernel<<<1, gridSize>>>(d_newPos + N, d_block_sums, gridSize, binMask, false);
}
add_sums_to_scan_kernel<<<gridSize-1, blockSize>>>(d_newPos + blockSize, d_block_sums, N - blockSize);
}
}
// initialize specific instantiations of hillis_steele_scan
template void hillis_steele_scan<ull>(const ull * const, ull * const, ull * const,
const size_t, const size_t, const size_t, const size_t); |
3246c92efadafdd15cc5e42238809df28c1edf02.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <cmath>
#include "caffe/common.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
// added functions for sparse matrices
template <>
void caffe_gpu_nnz<float>(const int M, const int N, const float* A,
int* nnzPerRowColumn, int* nnzTotalDevHostPtr) {
hipsparseMatDescr_t descr = 0;
CUSPARSE_CHECK(hipsparseCreateMatDescr(&descr));
CUSPARSE_CHECK(hipsparseSnnz(Caffe::cusparse_handle(), HIPSPARSE_DIRECTION_ROW,
M, N, descr, A, M, nnzPerRowColumn, nnzTotalDevHostPtr));
}
template <>
void caffe_gpu_nnz<double>(const int M, const int N, const double* A,
int* nnzPerRowColumn, int* nnzTotalDevHostPtr) {
hipsparseMatDescr_t descr = 0;
CUSPARSE_CHECK(hipsparseCreateMatDescr(&descr));
CUSPARSE_CHECK(hipsparseDnnz(Caffe::cusparse_handle(), HIPSPARSE_DIRECTION_ROW,
M, N, descr, A, M, nnzPerRowColumn, nnzTotalDevHostPtr));
}
template <>
void caffe_gpu_dense2csr<float>(const int M, const int N, const float* A, const int* nnzPerRow,
float* csrValA, int* csrRowPtrA, int* csrColIndA) {
hipsparseMatDescr_t descr = 0;
CUSPARSE_CHECK(hipsparseCreateMatDescr(&descr));
CUSPARSE_CHECK(hipsparseSdense2csr(Caffe::cusparse_handle(), M, N,
descr, A, M, nnzPerRow, csrValA, csrRowPtrA, csrColIndA));
}
template <>
void caffe_gpu_dense2csr<double>(const int M, const int N, const double* A, const int* nnzPerRow,
double* csrValA, int* csrRowPtrA, int* csrColIndA) {
hipsparseMatDescr_t descr = 0;
CUSPARSE_CHECK(hipsparseCreateMatDescr(&descr));
CUSPARSE_CHECK(hipsparseDdense2csr(Caffe::cusparse_handle(), M, N,
descr, A, M, nnzPerRow, csrValA, csrRowPtrA, csrColIndA));
}
template <>
void caffe_gpu_csrmm<float>(const int M, const int N, const int K, const int nnz,
const float alpha, const float* csrValA, const int* csrRowPtrA, const int* csrColIndA,
const float* B, const int ldb, const float beta, float* C) {
hipsparseMatDescr_t descr = 0;
CUSPARSE_CHECK(hipsparseCreateMatDescr(&descr));
CUSPARSE_CHECK(hipsparseScsrmm(Caffe::cusparse_handle(), HIPSPARSE_OPERATION_NON_TRANSPOSE,
M, N, K, nnz, &alpha, descr, csrValA, csrRowPtrA, csrColIndA,
B, ldb, &beta, C, M));
}
template <>
void caffe_gpu_csrmm<double>(const int M, const int N, const int K, const int nnz,
const double alpha, const double* csrValA, const int* csrRowPtrA, const int* csrColIndA,
const double* B, const int ldb, const double beta, double* C) {
hipsparseMatDescr_t descr = 0;
CUSPARSE_CHECK(hipsparseCreateMatDescr(&descr));
CUSPARSE_CHECK(hipsparseDcsrmm(Caffe::cusparse_handle(), HIPSPARSE_OPERATION_NON_TRANSPOSE,
M, N, K, nnz, &alpha, descr, csrValA, csrRowPtrA, csrColIndA,
B, ldb, &beta, C, M));
}
template <>
void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X,
float* Y) {
CUBLAS_CHECK(hipblasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X,
double* Y) {
CUBLAS_CHECK(hipblasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) {
if (X != Y) {
CUDA_CHECK(hipMemcpy(Y, X, N, hipMemcpyDefault)); // NOLINT(caffe/alt_fn)
}
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float *X) {
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double *X) {
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X,
const float beta, float* Y) {
caffe_gpu_scal<float>(N, beta, Y);
caffe_gpu_axpy<float>(N, alpha, X, Y);
}
template <>
void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X,
const double beta, double* Y) {
caffe_gpu_scal<double>(N, beta, Y);
caffe_gpu_axpy<double>(N, alpha, X, Y);
}
template <>
void caffe_gpu_dot<float>(const int n, const float* x, const float* y,
float* out) {
CUBLAS_CHECK(hipblasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_dot<double>(const int n, const double* x, const double* y,
double * out) {
CUBLAS_CHECK(hipblasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_asum<float>(const int n, const float* x, float* y) {
CUBLAS_CHECK(hipblasSasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_asum<double>(const int n, const double* x, double* y) {
CUBLAS_CHECK(hipblasDasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_scale<float>(const int n, const float alpha, const float *x,
float* y) {
CUBLAS_CHECK(hipblasScopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <>
void caffe_gpu_scale<double>(const int n, const double alpha, const double *x,
double* y) {
CUBLAS_CHECK(hipblasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <typename Dtype>
__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template <typename Dtype>
void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) {
if (alpha == 0) {
CUDA_CHECK(hipMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn)
return;
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( set_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template void caffe_gpu_set<int>(const int N, const int alpha, int* Y);
template void caffe_gpu_set<float>(const int N, const float alpha, float* Y);
template void caffe_gpu_set<double>(const int N, const double alpha, double* Y);
template <typename Dtype>
__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] += alpha;
}
}
template <>
void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_scalar_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template <>
void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_scalar_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template <typename Dtype>
__global__ void add_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] + b[index];
}
}
template <>
void caffe_gpu_add<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_add<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void sub_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] - b[index];
}
}
template <>
void caffe_gpu_sub<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_sub<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void mul_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
template <>
void caffe_gpu_mul<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_mul<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void div_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] / b[index];
}
}
template <>
void caffe_gpu_div<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_div<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = abs(a[index]);
}
}
template <>
void caffe_gpu_abs<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( abs_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_abs<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( abs_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = exp(a[index]);
}
}
template <>
void caffe_gpu_exp<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( exp_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_exp<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( exp_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = log(a[index]);
}
}
template <>
void caffe_gpu_log<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( log_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_log<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( log_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void powx_kernel(const int n, const Dtype* a,
const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = pow(a[index], alpha);
}
}
template <>
void caffe_gpu_powx<float>(const int N, const float* a,
const float alpha, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, alpha, y);
}
template <>
void caffe_gpu_powx<double>(const int N, const double* a,
const double alpha, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, alpha, y);
}
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index])
- (x[index] < Dtype(0)));
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index]));
void caffe_gpu_rng_uniform(const int n, unsigned int* r) {
CURAND_CHECK(hiprandGenerate(Caffe::curand_generator(), r, n));
}
template <>
void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b,
float* r) {
CURAND_CHECK(hiprandGenerateUniform(Caffe::curand_generator(), r, n));
const float range = b - a;
if (range != static_cast<float>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<float>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b,
double* r) {
CURAND_CHECK(hiprandGenerateUniformDouble(Caffe::curand_generator(), r, n));
const double range = b - a;
if (range != static_cast<double>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<double>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma,
float* r) {
CURAND_CHECK(
hiprandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma));
}
template <>
void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma,
double* r) {
CURAND_CHECK(
hiprandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma));
}
} // namespace caffe
| 3246c92efadafdd15cc5e42238809df28c1edf02.cu | #include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <cmath>
#include "caffe/common.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
// added functions for sparse matrices
template <>
void caffe_gpu_nnz<float>(const int M, const int N, const float* A,
int* nnzPerRowColumn, int* nnzTotalDevHostPtr) {
cusparseMatDescr_t descr = 0;
CUSPARSE_CHECK(cusparseCreateMatDescr(&descr));
CUSPARSE_CHECK(cusparseSnnz(Caffe::cusparse_handle(), CUSPARSE_DIRECTION_ROW,
M, N, descr, A, M, nnzPerRowColumn, nnzTotalDevHostPtr));
}
template <>
void caffe_gpu_nnz<double>(const int M, const int N, const double* A,
int* nnzPerRowColumn, int* nnzTotalDevHostPtr) {
cusparseMatDescr_t descr = 0;
CUSPARSE_CHECK(cusparseCreateMatDescr(&descr));
CUSPARSE_CHECK(cusparseDnnz(Caffe::cusparse_handle(), CUSPARSE_DIRECTION_ROW,
M, N, descr, A, M, nnzPerRowColumn, nnzTotalDevHostPtr));
}
template <>
void caffe_gpu_dense2csr<float>(const int M, const int N, const float* A, const int* nnzPerRow,
float* csrValA, int* csrRowPtrA, int* csrColIndA) {
cusparseMatDescr_t descr = 0;
CUSPARSE_CHECK(cusparseCreateMatDescr(&descr));
CUSPARSE_CHECK(cusparseSdense2csr(Caffe::cusparse_handle(), M, N,
descr, A, M, nnzPerRow, csrValA, csrRowPtrA, csrColIndA));
}
template <>
void caffe_gpu_dense2csr<double>(const int M, const int N, const double* A, const int* nnzPerRow,
double* csrValA, int* csrRowPtrA, int* csrColIndA) {
cusparseMatDescr_t descr = 0;
CUSPARSE_CHECK(cusparseCreateMatDescr(&descr));
CUSPARSE_CHECK(cusparseDdense2csr(Caffe::cusparse_handle(), M, N,
descr, A, M, nnzPerRow, csrValA, csrRowPtrA, csrColIndA));
}
template <>
void caffe_gpu_csrmm<float>(const int M, const int N, const int K, const int nnz,
const float alpha, const float* csrValA, const int* csrRowPtrA, const int* csrColIndA,
const float* B, const int ldb, const float beta, float* C) {
cusparseMatDescr_t descr = 0;
CUSPARSE_CHECK(cusparseCreateMatDescr(&descr));
CUSPARSE_CHECK(cusparseScsrmm(Caffe::cusparse_handle(), CUSPARSE_OPERATION_NON_TRANSPOSE,
M, N, K, nnz, &alpha, descr, csrValA, csrRowPtrA, csrColIndA,
B, ldb, &beta, C, M));
}
template <>
void caffe_gpu_csrmm<double>(const int M, const int N, const int K, const int nnz,
const double alpha, const double* csrValA, const int* csrRowPtrA, const int* csrColIndA,
const double* B, const int ldb, const double beta, double* C) {
cusparseMatDescr_t descr = 0;
CUSPARSE_CHECK(cusparseCreateMatDescr(&descr));
CUSPARSE_CHECK(cusparseDcsrmm(Caffe::cusparse_handle(), CUSPARSE_OPERATION_NON_TRANSPOSE,
M, N, K, nnz, &alpha, descr, csrValA, csrRowPtrA, csrColIndA,
B, ldb, &beta, C, M));
}
template <>
void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X,
float* Y) {
CUBLAS_CHECK(cublasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X,
double* Y) {
CUBLAS_CHECK(cublasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) {
if (X != Y) {
CUDA_CHECK(cudaMemcpy(Y, X, N, cudaMemcpyDefault)); // NOLINT(caffe/alt_fn)
}
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float *X) {
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double *X) {
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X,
const float beta, float* Y) {
caffe_gpu_scal<float>(N, beta, Y);
caffe_gpu_axpy<float>(N, alpha, X, Y);
}
template <>
void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X,
const double beta, double* Y) {
caffe_gpu_scal<double>(N, beta, Y);
caffe_gpu_axpy<double>(N, alpha, X, Y);
}
template <>
void caffe_gpu_dot<float>(const int n, const float* x, const float* y,
float* out) {
CUBLAS_CHECK(cublasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_dot<double>(const int n, const double* x, const double* y,
double * out) {
CUBLAS_CHECK(cublasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_asum<float>(const int n, const float* x, float* y) {
CUBLAS_CHECK(cublasSasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_asum<double>(const int n, const double* x, double* y) {
CUBLAS_CHECK(cublasDasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_scale<float>(const int n, const float alpha, const float *x,
float* y) {
CUBLAS_CHECK(cublasScopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <>
void caffe_gpu_scale<double>(const int n, const double alpha, const double *x,
double* y) {
CUBLAS_CHECK(cublasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <typename Dtype>
__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template <typename Dtype>
void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) {
if (alpha == 0) {
CUDA_CHECK(cudaMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn)
return;
}
// NOLINT_NEXT_LINE(whitespace/operators)
set_kernel<Dtype><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template void caffe_gpu_set<int>(const int N, const int alpha, int* Y);
template void caffe_gpu_set<float>(const int N, const float alpha, float* Y);
template void caffe_gpu_set<double>(const int N, const double alpha, double* Y);
template <typename Dtype>
__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] += alpha;
}
}
template <>
void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_scalar_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template <>
void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_scalar_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template <typename Dtype>
__global__ void add_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] + b[index];
}
}
template <>
void caffe_gpu_add<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_add<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void sub_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] - b[index];
}
}
template <>
void caffe_gpu_sub<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_sub<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void mul_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
template <>
void caffe_gpu_mul<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_mul<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void div_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] / b[index];
}
}
template <>
void caffe_gpu_div<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_div<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = abs(a[index]);
}
}
template <>
void caffe_gpu_abs<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
abs_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_abs<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
abs_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = exp(a[index]);
}
}
template <>
void caffe_gpu_exp<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
exp_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_exp<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
exp_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = log(a[index]);
}
}
template <>
void caffe_gpu_log<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
log_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_log<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
log_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void powx_kernel(const int n, const Dtype* a,
const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = pow(a[index], alpha);
}
}
template <>
void caffe_gpu_powx<float>(const int N, const float* a,
const float alpha, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, alpha, y);
}
template <>
void caffe_gpu_powx<double>(const int N, const double* a,
const double alpha, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, alpha, y);
}
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index])
- (x[index] < Dtype(0)));
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index]));
void caffe_gpu_rng_uniform(const int n, unsigned int* r) {
CURAND_CHECK(curandGenerate(Caffe::curand_generator(), r, n));
}
template <>
void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b,
float* r) {
CURAND_CHECK(curandGenerateUniform(Caffe::curand_generator(), r, n));
const float range = b - a;
if (range != static_cast<float>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<float>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b,
double* r) {
CURAND_CHECK(curandGenerateUniformDouble(Caffe::curand_generator(), r, n));
const double range = b - a;
if (range != static_cast<double>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<double>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma,
float* r) {
CURAND_CHECK(
curandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma));
}
template <>
void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma,
double* r) {
CURAND_CHECK(
curandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma));
}
} // namespace caffe
|
eafa5be49f92a547a2a7fe820cfea2ca545282d8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
*
* tex2d_addressing_device.cu
*
* Microdemo for 2D texturing from device memory.
*
* Build with: nvcc -I ../chLib <options> tex2d_addressing_device.cu
* Requires: No minimum SM requirement.
*
* Copyright (c) 2011-2012, Archaea Software, LLC.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <stdio.h>
#include <float.h>
#include <assert.h>
#include <chError.h>
texture<float2, 2, hipReadModeElementType> tex;
extern "C" __global__ void
TexReadout( float4 *out, size_t Width, size_t Pitch, size_t Height, float2 base, float2 increment )
{
for ( int row = blockIdx.y*blockDim.y + threadIdx.y;
row < Height;
row += blockDim.y*gridDim.y )
{
float4 *outrow = (float4 *) ((char *) out+row*Pitch);
for ( int col = blockIdx.x*blockDim.x + threadIdx.x;
col < Width;
col += blockDim.x*gridDim.x )
{
float4 value;
float2 texvalue;
value.x = base.x+(float)col*increment.x;
value.y = base.y+(float)row*increment.y;
texvalue = tex2D( tex, value.x, value.y);
value.z = texvalue.x;
value.w = texvalue.y;
outrow[col] = value;
}
}
}
template<class T>
void
CreateAndPrintTex(
T *initTex,
size_t inWidth, size_t inHeight,
size_t outWidth, size_t outHeight,
float2 base, float2 increment,
hipTextureFilterMode filterMode,
hipTextureAddressMode addressModeX,
hipTextureAddressMode addressModeY )
{
T *texContents = 0;
T *texDevice = 0;
size_t texPitch;
float4 *outHost = 0, *outDevice = 0;
hipError_t status;
size_t outPitch;
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<T>();
dim3 blocks, threads;
// use caller-provided array, if any, to initialize texture
if ( initTex ) {
texContents = initTex;
}
else {
// default is to initialize with identity elements
texContents = (T *) malloc( inWidth*inHeight*sizeof(T) );
if ( ! texContents )
goto Error;
for ( int row = 0; row < inHeight; row++ ) {
T *rowptr = texContents + row*inWidth;
for ( int col = 0; col < outHeight; col++ ) {
T value;
value.x = (float) col;
value.y = (float) row;
rowptr[col] = value;
}
}
}
cuda(MallocPitch( &texDevice,
&texPitch,
inWidth*sizeof(T),
inHeight));
cuda(Memcpy2D( texDevice, texPitch,
texContents, inWidth*sizeof(T),
inWidth*sizeof(T),
inHeight,
hipMemcpyHostToDevice));
cuda(BindTexture2D( NULL,
&tex,
texDevice,
&channelDesc,
inWidth,
inHeight,
texPitch ));
outPitch = outWidth*sizeof(float4);
outPitch = (outPitch+0x3f)&~0x3f;
cuda(HostAlloc( (void **) &outHost, outWidth*outPitch, hipHostMallocMapped));
cuda(HostGetDevicePointer( (void **) &outDevice, outHost, 0 ));
tex.filterMode = filterMode;
tex.addressMode[0] = addressModeX;
tex.addressMode[1] = addressModeY;
blocks.x = 2;
blocks.y = 1;
threads.x = 64; threads.y = 4;
hipLaunchKernelGGL(( TexReadout), dim3(blocks),dim3(threads), 0, 0, outDevice, outWidth, outPitch, outHeight, base, increment );
cuda(ThreadSynchronize());
for ( int row = 0; row < outHeight; row++ ) {
float4 *outrow = (float4 *) ((char *) outHost + row*outPitch);
for ( int col = 0; col < outWidth; col++ ) {
printf( "(%.1f, %.1f) ", outrow[col].z, outrow[col].w );
}
printf( "\n" );
}
printf( "\n" );
Error:
if ( ! initTex ) free( texContents );
hipFree( texDevice );
hipHostFree( outHost );
}
int
main( int argc, char *argv[] )
{
int ret = 1;
hipError_t status;
cuda(SetDeviceFlags(hipDeviceMapHost));
cuda(Free(0));
// go through once each with linear and point filtering
do {
tex.normalized = false;
tex.filterMode = hipFilterModePoint;
tex.addressMode[0] = hipAddressModeClamp;
tex.addressMode[1] = hipAddressModeClamp;
float2 base, increment;
base.x = 0.0f;//-1.0f;
base.y = 0.0f;//-1.0f;
increment.x = 1.0f;
increment.y = 1.0f;
// CreateAndPrintTex<float2>( NULL, 8, 8, 8, 8, base, increment, tex.filterMode, tex.addressMode[0], tex.addressMode[1] );
CreateAndPrintTex<float2>( NULL, 256, 256, 256, 256, base, increment, tex.filterMode, tex.addressMode[0], tex.addressMode[1] );
} while ( tex.filterMode == hipFilterModeLinear );
ret = 0;
Error:
return ret;
}
| eafa5be49f92a547a2a7fe820cfea2ca545282d8.cu | /*
*
* tex2d_addressing_device.cu
*
* Microdemo for 2D texturing from device memory.
*
* Build with: nvcc -I ../chLib <options> tex2d_addressing_device.cu
* Requires: No minimum SM requirement.
*
* Copyright (c) 2011-2012, Archaea Software, LLC.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <stdio.h>
#include <float.h>
#include <assert.h>
#include <chError.h>
texture<float2, 2, cudaReadModeElementType> tex;
extern "C" __global__ void
TexReadout( float4 *out, size_t Width, size_t Pitch, size_t Height, float2 base, float2 increment )
{
for ( int row = blockIdx.y*blockDim.y + threadIdx.y;
row < Height;
row += blockDim.y*gridDim.y )
{
float4 *outrow = (float4 *) ((char *) out+row*Pitch);
for ( int col = blockIdx.x*blockDim.x + threadIdx.x;
col < Width;
col += blockDim.x*gridDim.x )
{
float4 value;
float2 texvalue;
value.x = base.x+(float)col*increment.x;
value.y = base.y+(float)row*increment.y;
texvalue = tex2D( tex, value.x, value.y);
value.z = texvalue.x;
value.w = texvalue.y;
outrow[col] = value;
}
}
}
template<class T>
void
CreateAndPrintTex(
T *initTex,
size_t inWidth, size_t inHeight,
size_t outWidth, size_t outHeight,
float2 base, float2 increment,
cudaTextureFilterMode filterMode,
cudaTextureAddressMode addressModeX,
cudaTextureAddressMode addressModeY )
{
T *texContents = 0;
T *texDevice = 0;
size_t texPitch;
float4 *outHost = 0, *outDevice = 0;
cudaError_t status;
size_t outPitch;
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<T>();
dim3 blocks, threads;
// use caller-provided array, if any, to initialize texture
if ( initTex ) {
texContents = initTex;
}
else {
// default is to initialize with identity elements
texContents = (T *) malloc( inWidth*inHeight*sizeof(T) );
if ( ! texContents )
goto Error;
for ( int row = 0; row < inHeight; row++ ) {
T *rowptr = texContents + row*inWidth;
for ( int col = 0; col < outHeight; col++ ) {
T value;
value.x = (float) col;
value.y = (float) row;
rowptr[col] = value;
}
}
}
cuda(MallocPitch( &texDevice,
&texPitch,
inWidth*sizeof(T),
inHeight));
cuda(Memcpy2D( texDevice, texPitch,
texContents, inWidth*sizeof(T),
inWidth*sizeof(T),
inHeight,
cudaMemcpyHostToDevice));
cuda(BindTexture2D( NULL,
&tex,
texDevice,
&channelDesc,
inWidth,
inHeight,
texPitch ));
outPitch = outWidth*sizeof(float4);
outPitch = (outPitch+0x3f)&~0x3f;
cuda(HostAlloc( (void **) &outHost, outWidth*outPitch, cudaHostAllocMapped));
cuda(HostGetDevicePointer( (void **) &outDevice, outHost, 0 ));
tex.filterMode = filterMode;
tex.addressMode[0] = addressModeX;
tex.addressMode[1] = addressModeY;
blocks.x = 2;
blocks.y = 1;
threads.x = 64; threads.y = 4;
TexReadout<<<blocks,threads>>>( outDevice, outWidth, outPitch, outHeight, base, increment );
cuda(ThreadSynchronize());
for ( int row = 0; row < outHeight; row++ ) {
float4 *outrow = (float4 *) ((char *) outHost + row*outPitch);
for ( int col = 0; col < outWidth; col++ ) {
printf( "(%.1f, %.1f) ", outrow[col].z, outrow[col].w );
}
printf( "\n" );
}
printf( "\n" );
Error:
if ( ! initTex ) free( texContents );
cudaFree( texDevice );
cudaFreeHost( outHost );
}
int
main( int argc, char *argv[] )
{
int ret = 1;
cudaError_t status;
cuda(SetDeviceFlags(cudaDeviceMapHost));
cuda(Free(0));
// go through once each with linear and point filtering
do {
tex.normalized = false;
tex.filterMode = cudaFilterModePoint;
tex.addressMode[0] = cudaAddressModeClamp;
tex.addressMode[1] = cudaAddressModeClamp;
float2 base, increment;
base.x = 0.0f;//-1.0f;
base.y = 0.0f;//-1.0f;
increment.x = 1.0f;
increment.y = 1.0f;
// CreateAndPrintTex<float2>( NULL, 8, 8, 8, 8, base, increment, tex.filterMode, tex.addressMode[0], tex.addressMode[1] );
CreateAndPrintTex<float2>( NULL, 256, 256, 256, 256, base, increment, tex.filterMode, tex.addressMode[0], tex.addressMode[1] );
} while ( tex.filterMode == cudaFilterModeLinear );
ret = 0;
Error:
return ret;
}
|
7d1b827a75805084e1945edc7db256efbfbc77a0.hip | // !!! This is a file automatically generated by hipify!!!
/*
* ElementwiseAddition
*
* This CUDA code can handle/work with any type of the input mxArrays,
* GPUarray or standard matlab CPU array as input {prhs[0], prhs[1] := mxGPUArray or CPU Array}
* gpuArray output, C=ELA_CUDA(A,B, alpha, beta) C=A*alpha+B*beta.
* Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London
* Wellcome Trust Centre for Neuroimaging
* Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm)
* Copyright 2018
* Kevin Bronik
*/
#include "matrix.h"
#include "ElementwiseAddition.cuh"
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, const int line)
{
if (code != hipSuccess)
{
//fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), __FILE__, __LINE__);
printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,hipGetErrorString(code));
hipDeviceReset();
mexErrMsgIdAndTxt( "MATLAB:mexatexit:fatal", "check the memory and process usage");
}
}
template <unsigned int TILE_DIM > __global__ void MatAddSharedElementwise(float * A, float * B, float * C, int numARows,
int numAColumns, int numBRows,
int numBColumns, int numCRows, int numCColumns, float alpha, float beta) {
float CValue = 0.0;
int Row = blockIdx.y*TILE_DIM + threadIdx.y;
int Col = blockIdx.x*TILE_DIM + threadIdx.x;
__shared__ float As[TILE_DIM][TILE_DIM];
__shared__ float Bs[TILE_DIM][TILE_DIM];
for (int k = 0; k < (TILE_DIM + numAColumns - 1) / TILE_DIM; k++) {
if (k*TILE_DIM + threadIdx.x < numAColumns && Row < numARows) As[threadIdx.y][threadIdx.x] = A[Row*numAColumns + k*TILE_DIM + threadIdx.x]*alpha;
//else As[threadIdx.y][threadIdx.x] = 10.0;
if (k*TILE_DIM + threadIdx.x < numBColumns && Row < numBRows) Bs[threadIdx.y][threadIdx.x] = B[Row*numBColumns + k*TILE_DIM + threadIdx.x]*beta;
//Bs[threadIdx.y][threadIdx.x] = B[(k*TILE_DIM + threadIdx.y)*BCols + Col];
//else Bs[threadIdx.y][threadIdx.x] = 50.0;
__syncthreads();
//for (int n = 0; n < TILE_DIM; ++n)
CValue = As[threadIdx.y][threadIdx.x] + Bs[threadIdx.y][threadIdx.x];
if (Row < numCRows && Col < numCColumns) C[Row*numBColumns + k*TILE_DIM + threadIdx.x] = CValue;
__syncthreads();
}
}
void ElementwiseAddition(float * A, float * B, float * C, int numARows,
int numAColumns, int numBRows,
int numBColumns, int numCRows, int numCColumns, float alpha, float beta)
{
float * hostA = A; // The A matrix
float * hostB = B; // The A matrix
//float * hostB = B; // The B matrix
float * hostC = C; // The output C matrix
//float * hostComputedC;
float * deviceA;
float * deviceB;
//float * deviceB;
float * deviceC;
//hostA = (float *)malloc(sizeof(float)*numARows*numAColumns);
// test32
hipError_t error;
int devID = 0;
// get number of SMs on this GPU
error = hipGetDevice(&devID);
hipDeviceProp_t deviceProp;
error = hipGetDeviceProperties(&deviceProp, devID);
if (error != hipSuccess)
{
printf("hipGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
int TILEDIM = (deviceProp.major < 2) ? 16 : 32;
// Setting numCRows and numCColumns
numCRows = numARows;
numCColumns = numAColumns;
//hostC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
//hostComputedC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
// Allocating GPU memory
gpuErrchk(hipMalloc((void **)&deviceA, sizeof(float)*numARows*numAColumns));
gpuErrchk(hipMalloc((void **)&deviceB, sizeof(float)*numBRows*numBColumns));
//hipMalloc((void **)&deviceB, sizeof(float)*numBRows*numBColumns);
gpuErrchk(hipMalloc((void **)&deviceC, sizeof(float)*numCRows*numCColumns));
//thrust::device_ptr< float >dev_ptr_A(deviceA);
//thrust::device_ptr< float >dev_ptr_C(deviceC);
// Copy memory to the GPU
gpuErrchk(hipMemcpy(deviceA, hostA, sizeof(float)*numARows*numAColumns, hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(deviceB, hostB, sizeof(float)*numBRows*numBColumns, hipMemcpyHostToDevice));
//hipMemcpy(deviceB, hostB, sizeof(float)*numBRows*numBColumns, hipMemcpyHostToDevice);
/////////////////////////////////////////////////////////
//dim3 dimGrid((numCColumns / Tile_size) + 1, (numCRows / Tile_size) + 1, 1);//Number of Blocks required
//dim3 dimBlock(Tile_size, Tile_size, 1);//Number of threads in each block
/////////////////////////////////////////////////////////
unsigned int TILE_DIM=16;
dim3 dimBlock;
dim3 dimGrid;
switch (TILEDIM){
case 16:
TILE_DIM= TILEDIM;
dimBlock.x=TILE_DIM;
dimBlock.y=TILE_DIM;
dimBlock.z=1;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
MatAddSharedElementwise <16> << <dimGrid, dimBlock >> >(deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns, alpha, beta);
//matrixMultiplyShared << <dimGrid, dimBlock >> >(thrust::raw_pointer_cast(&dev_ptr_A[0]), thrust::raw_pointer_cast(&dev_ptr_C[0]), numARows, numAColumns, numCRows, numCColumns);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
// Copy the results in GPU memory back to the CPU
gpuErrchk(hipMemcpy(hostC, deviceC, sizeof(float)*numCRows*numCColumns, hipMemcpyDeviceToHost));
C = hostC;
//thrust::device_free(dev_ptr_A);
//thrust::device_free(dev_ptr_C);
gpuErrchk(hipFree(deviceA));
gpuErrchk(hipFree(deviceB));
//hipFree(deviceB);
gpuErrchk(hipFree(deviceC));
return;
case 32:
TILE_DIM= TILEDIM;
dimBlock.x=TILE_DIM;
dimBlock.y=TILE_DIM;
dimBlock.z=1;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
MatAddSharedElementwise <32> << <dimGrid, dimBlock >> >(deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns, alpha, beta);
//matrixMultiplyShared << <dimGrid, dimBlock >> >(thrust::raw_pointer_cast(&dev_ptr_A[0]), thrust::raw_pointer_cast(&dev_ptr_C[0]), numARows, numAColumns, numCRows, numCColumns);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
// Copy the results in GPU memory back to the CPU
gpuErrchk(hipMemcpy(hostC, deviceC, sizeof(float)*numCRows*numCColumns, hipMemcpyDeviceToHost));
C = hostC;
//thrust::device_free(dev_ptr_A);
//thrust::device_free(dev_ptr_C);
gpuErrchk(hipFree(deviceA));
gpuErrchk(hipFree(deviceB));
//hipFree(deviceB);
gpuErrchk(hipFree(deviceC));
return;
}
}
| 7d1b827a75805084e1945edc7db256efbfbc77a0.cu | /*
* ElementwiseAddition
*
* This CUDA code can handle/work with any type of the input mxArrays,
* GPUarray or standard matlab CPU array as input {prhs[0], prhs[1] := mxGPUArray or CPU Array}
* gpuArray output, C=ELA_CUDA(A,B, alpha, beta) C=A*alpha+B*beta.
* Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London
* Wellcome Trust Centre for Neuroimaging
* Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm)
* Copyright 2018
* Kevin Bronik
*/
#include "matrix.h"
#include "ElementwiseAddition.cuh"
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include <cuda.h>
#include <cuda_runtime.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, const int line)
{
if (code != cudaSuccess)
{
//fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), __FILE__, __LINE__);
printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,cudaGetErrorString(code));
cudaDeviceReset();
mexErrMsgIdAndTxt( "MATLAB:mexatexit:fatal", "check the memory and process usage");
}
}
template <unsigned int TILE_DIM > __global__ void MatAddSharedElementwise(float * A, float * B, float * C, int numARows,
int numAColumns, int numBRows,
int numBColumns, int numCRows, int numCColumns, float alpha, float beta) {
float CValue = 0.0;
int Row = blockIdx.y*TILE_DIM + threadIdx.y;
int Col = blockIdx.x*TILE_DIM + threadIdx.x;
__shared__ float As[TILE_DIM][TILE_DIM];
__shared__ float Bs[TILE_DIM][TILE_DIM];
for (int k = 0; k < (TILE_DIM + numAColumns - 1) / TILE_DIM; k++) {
if (k*TILE_DIM + threadIdx.x < numAColumns && Row < numARows) As[threadIdx.y][threadIdx.x] = A[Row*numAColumns + k*TILE_DIM + threadIdx.x]*alpha;
//else As[threadIdx.y][threadIdx.x] = 10.0;
if (k*TILE_DIM + threadIdx.x < numBColumns && Row < numBRows) Bs[threadIdx.y][threadIdx.x] = B[Row*numBColumns + k*TILE_DIM + threadIdx.x]*beta;
//Bs[threadIdx.y][threadIdx.x] = B[(k*TILE_DIM + threadIdx.y)*BCols + Col];
//else Bs[threadIdx.y][threadIdx.x] = 50.0;
__syncthreads();
//for (int n = 0; n < TILE_DIM; ++n)
CValue = As[threadIdx.y][threadIdx.x] + Bs[threadIdx.y][threadIdx.x];
if (Row < numCRows && Col < numCColumns) C[Row*numBColumns + k*TILE_DIM + threadIdx.x] = CValue;
__syncthreads();
}
}
void ElementwiseAddition(float * A, float * B, float * C, int numARows,
int numAColumns, int numBRows,
int numBColumns, int numCRows, int numCColumns, float alpha, float beta)
{
float * hostA = A; // The A matrix
float * hostB = B; // The A matrix
//float * hostB = B; // The B matrix
float * hostC = C; // The output C matrix
//float * hostComputedC;
float * deviceA;
float * deviceB;
//float * deviceB;
float * deviceC;
//hostA = (float *)malloc(sizeof(float)*numARows*numAColumns);
// test32
cudaError_t error;
int devID = 0;
// get number of SMs on this GPU
error = cudaGetDevice(&devID);
cudaDeviceProp deviceProp;
error = cudaGetDeviceProperties(&deviceProp, devID);
if (error != cudaSuccess)
{
printf("cudaGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
int TILEDIM = (deviceProp.major < 2) ? 16 : 32;
// Setting numCRows and numCColumns
numCRows = numARows;
numCColumns = numAColumns;
//hostC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
//hostComputedC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
// Allocating GPU memory
gpuErrchk(cudaMalloc((void **)&deviceA, sizeof(float)*numARows*numAColumns));
gpuErrchk(cudaMalloc((void **)&deviceB, sizeof(float)*numBRows*numBColumns));
//cudaMalloc((void **)&deviceB, sizeof(float)*numBRows*numBColumns);
gpuErrchk(cudaMalloc((void **)&deviceC, sizeof(float)*numCRows*numCColumns));
//thrust::device_ptr< float >dev_ptr_A(deviceA);
//thrust::device_ptr< float >dev_ptr_C(deviceC);
// Copy memory to the GPU
gpuErrchk(cudaMemcpy(deviceA, hostA, sizeof(float)*numARows*numAColumns, cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(deviceB, hostB, sizeof(float)*numBRows*numBColumns, cudaMemcpyHostToDevice));
//cudaMemcpy(deviceB, hostB, sizeof(float)*numBRows*numBColumns, cudaMemcpyHostToDevice);
/////////////////////////////////////////////////////////
//dim3 dimGrid((numCColumns / Tile_size) + 1, (numCRows / Tile_size) + 1, 1);//Number of Blocks required
//dim3 dimBlock(Tile_size, Tile_size, 1);//Number of threads in each block
/////////////////////////////////////////////////////////
unsigned int TILE_DIM=16;
dim3 dimBlock;
dim3 dimGrid;
switch (TILEDIM){
case 16:
TILE_DIM= TILEDIM;
dimBlock.x=TILE_DIM;
dimBlock.y=TILE_DIM;
dimBlock.z=1;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
MatAddSharedElementwise <16> << <dimGrid, dimBlock >> >(deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns, alpha, beta);
//matrixMultiplyShared << <dimGrid, dimBlock >> >(thrust::raw_pointer_cast(&dev_ptr_A[0]), thrust::raw_pointer_cast(&dev_ptr_C[0]), numARows, numAColumns, numCRows, numCColumns);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
// Copy the results in GPU memory back to the CPU
gpuErrchk(cudaMemcpy(hostC, deviceC, sizeof(float)*numCRows*numCColumns, cudaMemcpyDeviceToHost));
C = hostC;
//thrust::device_free(dev_ptr_A);
//thrust::device_free(dev_ptr_C);
gpuErrchk(cudaFree(deviceA));
gpuErrchk(cudaFree(deviceB));
//cudaFree(deviceB);
gpuErrchk(cudaFree(deviceC));
return;
case 32:
TILE_DIM= TILEDIM;
dimBlock.x=TILE_DIM;
dimBlock.y=TILE_DIM;
dimBlock.z=1;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
MatAddSharedElementwise <32> << <dimGrid, dimBlock >> >(deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns, alpha, beta);
//matrixMultiplyShared << <dimGrid, dimBlock >> >(thrust::raw_pointer_cast(&dev_ptr_A[0]), thrust::raw_pointer_cast(&dev_ptr_C[0]), numARows, numAColumns, numCRows, numCColumns);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
// Copy the results in GPU memory back to the CPU
gpuErrchk(cudaMemcpy(hostC, deviceC, sizeof(float)*numCRows*numCColumns, cudaMemcpyDeviceToHost));
C = hostC;
//thrust::device_free(dev_ptr_A);
//thrust::device_free(dev_ptr_C);
gpuErrchk(cudaFree(deviceA));
gpuErrchk(cudaFree(deviceB));
//cudaFree(deviceB);
gpuErrchk(cudaFree(deviceC));
return;
}
}
|
9a344a4806297e10933db239b6d79a593020907b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <stdlib.h>
#include <stdio.h>
#include <iostream>
#include <math.h>
#include <time.h>
#include <fstream>
#include <vector>
#include <fstream>
#include <hiprand/hiprand_kernel.h>
#include <hipfft.h>
float L,LL; int N, C,itera;
using namespace std;
// funcin Maxwelliana de la distribucin de las partculas.
__device__ float distribution (float vb, float aleatorio, hiprandState_t *states) //generador de distribucin maxwelliana para la velocidad
{
// Genera un valor random v
float fmax = 0.5 * (1.0 + exp (-2.0 * vb * vb));
float vmin = - 5.0 * vb;
float vmax = + 5.0 * vb;
float v;
float f;
float x;
int Idx = blockIdx.x*blockDim.x + threadIdx.x;
while(true){
v = vmin + ((vmax - vmin) * aleatorio);
f = 0.5 * (exp (-(v - vb) * (v - vb) / 2.0) +
exp (-(v + vb) * (v + vb) / 2.0));
x = fmax * aleatorio;
if(x > f) aleatorio = hiprand_uniform(states + Idx);
else return v;
}
}
//Distribucin aleatoria de las partculas.
__global__ void distribucionParticulas(float *rx,float *ry,float *vx,float *vy,int N,hiprandState_t *states,float vb,float L){
int Idx = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int seed = (unsigned int) (clock() * Idx);
hiprand_init(seed, 0, 0, states + Idx);
if(Idx < N){
rx[Idx] = L*hiprand_uniform(states + Idx); //inicializando la posicion aleatoria en x
ry[Idx] = L*hiprand_uniform(states + Idx);
vx[Idx] = distribution(vb,hiprand_uniform(states + Idx),states);//;L*curand_uniform_float(states + Idx);//distribution(vb,states); //inicializa la velocidad con una distribucion maxwelliana
vy[Idx] = distribution(vb,hiprand_uniform(states + Idx),states);//L*curand_uniform_float(states + Idx);//distribution(vb,states); //inicializa la velocidad con una distribucion maxwelliana
}
}
// inicializacin de la densidad.
__global__ void inicializacionDensidad(float *ne,int C){
int Id=blockIdx.x*blockDim.x + threadIdx.x;
if(Id<(C*C)){
ne[Id]=0.0;
}
}
//Calculo de la densidad en cada celda.
__global__ void calculoDensidad(float *rx, float *ry, float *ne, int N, int C,float L){
int Id=blockIdx.x*blockDim.x + threadIdx.x;
float dx = L / float (C);
float dxx=L/float(C*C);
if(Id<N){
int jx = int(rx[Id]/dx); //posicion en x de la particula
int jy = int(ry[Id]/dx); //posicion en y de la particula
float yx = (rx[Id]/dx) - (float)jx; //posicion exacta de la particula en x de la celda "j"
//float yy = (ry[Id]/dx) - (float)jy; //posicion exacta de la particula en y de la celda "j"
ne[(jy*C)+jx] += (1. - yx)/dxx;
if(jx+1==C) ne[(jy*C)] += yx/dxx;
else ne[(jy*C)+jx+1] += yx/dxx;
}
}
//pasar de reales a complejos.
__global__ void real2complex (float *ne, hipfftComplex *u, int C)
{
int idx = blockIdx.x*blockDim.x+threadIdx.x;
int idy = blockIdx.y*blockDim.y+threadIdx.y;
int index =idy*C+idx;
if ( idx < C && idy <C)
{
u[index].x = ne[index];
u[index].y = 0.0f;
}
}
//__global__ void prueba (hipfftComplex *vf, float *vr, int C){
// int idx = blockIdx.x*blockDim.x+threadIdx.x;
// int idy = blockIdx.y*blockDim.y+threadIdx.y;
// int index =idy*C+idx;
//
// if(idx<C && idy<C){
//
// vr[index]= (vf[index].x)/((float)C*(float)C*(float)C*(float)C);
// vr[index]= (vf[index].y)/((float)C*(float)C*(float)C*(float)C);
//
// }
//}
__global__ void solve_Poisson(hipfftComplex *vf, hipfftComplex *v, int C,float L){
int idx = blockIdx.x*blockDim.x+threadIdx.x;
int idy = blockIdx.y*blockDim.y+threadIdx.y;
float dx = L / float (C);
float i,W,Wm,Wn;
i = (0.0,L);
W = exp(2.0 * M_PI * i / float(C));
Wm = L;
Wn = L;
if(idx<C && idy<C){
int index = idy*C+idx;
float denom;
denom = 4.0;
denom -= (Wm + (L / Wm) + Wn +( L / Wn));
if (denom != 0.0){
vf[index].x *= dx*dx/denom;
vf[index].y *= dx*dx/denom;
}
Wn *= W;//se multiplica por la constante W
}
Wm *= W;
if(idx<C && idy<C){
int index = idx*C+idy;
v[index].x=vf[index].x;
v[index].y=vf[index].y;
}
}
__global__ void complex2real(hipfftComplex *v, float *vr, int C){
/* compute idx and idy, the location of the element in the original CxC array */
int idx = blockIdx.x*blockDim.x+threadIdx.x;
int idy = blockIdx.y*blockDim.y+threadIdx.y;
if ( idx < C && idy <C)
{
int index = idy*C+idx;
vr[index] = v[index].x /((float)C*(float)C);
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
int main(){
// Parametros
L = 64.0; // dominio de la solucion 0 <= x <= L (en longitudes de debye)
//L=LL*LL;
N = 10000; // Numero de particulas
C = 64; // Nmero de celdas.
float vb = 3.0; // velocidad promedio de los electrones
//float kappa = 2. * M_PI / (L);
//float dt=0.1; // delta tiempo (en frecuencias inversas del plasma)
//float tmax=10000; // cantidad de iteraciones. deben ser 100 mil segun el material
//int skip = int (tmax / dt) / 10; //saltos del algoritmo para reportar datos
//int itera=0;
float salida=0.;
//float dx = L / float (C);
/////////////////////////////////////////////////////////////////////////////////////////////////////
//Inicializacion de la posicin de las particulas en x, y y velocidad en vx,vy del host y dispositivo.
float *rx_h,*ry_h,*vx_h,*vy_h;
float *rx_d,*ry_d, *vx_d,*vy_d;
////////////////////////////////////////////////////////////////////////////////////////////////////
// inicializacin de las variables de densidad del host y dispositivo.
float *ne_h;
float *ne_d;
float *vr_h;
float *vr_d;
////////////////////////////////////////////////////////////////////////////////////////////////////
//inicializacion tipo complex a real.
hipfftComplex *u_complex_d,*vf_complex_d,*v_complex_d ;
hipMalloc((void**)&u_complex_d,sizeof(hipfftComplex)*C*C);
hipMalloc((void**)&vf_complex_d,sizeof(hipfftComplex)*C*C);
hipMalloc((void**)&v_complex_d,sizeof(hipfftComplex)*C*C);
////////////////////////////////////////////////////////////////////////////////////////////////////
int size = N*sizeof(float);
int size_ne=C*C*sizeof(float);
//////////////////////////////////////////////////////////////////////////////////////////////////////
//reserva en memoria al host
rx_h = (float *)malloc(size);
ry_h = (float *)malloc(size);
vx_h = (float *)malloc(size);
vy_h = (float *)malloc(size);
ne_h = (float *)malloc(size_ne);
vr_h = (float *)malloc(size_ne);
//////////////////////////////////////////////////////////////////////////////////////////////////////
//reserva de memoria del dispositivo.
hipMalloc((void **)&rx_d,size);
hipMalloc((void **)&ry_d,size);
hipMalloc((void **)&vx_d,size);
hipMalloc((void **)&vy_d,size);
hipMalloc((void **)&ne_d,size_ne);
hipMalloc((void **)&vr_d,size_ne);
////////////////////////////////////////////////////////////////////////////////////////////////////
//valores aleatorios y tamaos de los vectores.
hiprandState_t *devStates;
hipMalloc((void **) &devStates, N * sizeof(hiprandState_t));
float blockSize = 1024;
dim3 dimBlock (ceil(N/blockSize), 1, 1);
dim3 dimBlock2 (ceil((C*C)/blockSize), 1, 1);
dim3 dimGrid (blockSize, 1, 1);
hipLaunchKernelGGL(( distribucionParticulas), dim3(blockSize),dim3(dimBlock), 0, 0, rx_d,ry_d,vx_d,vy_d,N,devStates,vb,L);
hipDeviceSynchronize();
hipLaunchKernelGGL(( inicializacionDensidad), dim3(blockSize),dim3(dimBlock2), 0, 0, ne_d,C);
hipDeviceSynchronize();
hipLaunchKernelGGL(( calculoDensidad), dim3(blockSize),dim3(dimBlock), 0, 0, rx_d,ry_d,ne_d,N,C,L);
hipDeviceSynchronize();
hipfftHandle plan;
hipfftPlan2d(&plan, C, C, HIPFFT_C2C);
hipLaunchKernelGGL(( real2complex), dim3(blockSize),dim3(dimBlock2), 0, 0, ne_d,u_complex_d,C);
hipDeviceSynchronize();
hipfftExecC2C (plan, u_complex_d, vf_complex_d, HIPFFT_FORWARD);
// dividir el resultado por C4
//prueba<<<dimGrid, dimBlock2>>> (vf_complex_d,vr_d,C);
v_complex_d[0].x=0.0;
v_complex_d[0].y=0.0;
hipLaunchKernelGGL(( solve_Poisson), dim3(dimGrid), dim3(dimBlock2), 0, 0, vf_complex_d,v_complex_d,C,L);
hipDeviceSynchronize();
hipfftExecC2C (plan, v_complex_d, v_complex_d, HIPFFT_BACKWARD);
hipLaunchKernelGGL(( complex2real), dim3(dimGrid), dim3(dimBlock2), 0, 0, v_complex_d,vr_d,C);
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//posicin en x.
hipMemcpy(rx_h, rx_d, size, hipMemcpyDeviceToHost);
// posicin en y.
hipMemcpy(ry_h, ry_d, size, hipMemcpyDeviceToHost);
// velocidad en x.
hipMemcpy(vx_h, vx_d, size, hipMemcpyDeviceToHost);
//velocidad en y.
hipMemcpy(vy_h, vy_d, size, hipMemcpyDeviceToHost);
//inicializacion densidades
hipMemcpy(ne_h, ne_d, size_ne, hipMemcpyDeviceToHost);
//calculo poisson
hipMemcpy (vr_h , vr_d, size_ne, hipMemcpyDeviceToHost);
///////////////////Imprimir los resultados en archivos//////////////////////
ofstream init;
init.open("distribucionInicial.txt");
for (int i = 0; i < N; i++){
init<<rx_h[i]<<" "<<ry_h[i]<<" "<<vx_h[i]<<" "<<vy_h[i]<<endl;
}
init.close();
init.open("salida_densidad3.txt");
for (int i = 0; i < C*C; i++){
init<<ne_h[i]<<endl;
salida+=ne_h[i];
}
init.close();
cout<<salida<<" "<<endl;
init.open("entrada_poisson");
for (int i = 0; i < C; i++){
for (int j = 0; j < C; j++){
init<<ne_h[(C*i)+j]<<" ";
}
init<<endl;
}
init.close();
init.open("poisson");
for (int i = 0; i < C; i++){
for (int j = 0; j < C; j++){
init<< vr_h[(C*j)+i]<<" ";
}
init<<endl;
}
init.close();
////////////////////Liberar memoria//////////////////////////
free(rx_h);
free(ry_h);
free(vx_h);
free(vy_h);
free(ne_h);
free(vr_h);
hipfftDestroy(plan);
hipFree(rx_d);
hipFree(ry_d);
hipFree(vx_d);
hipFree(vy_d);
hipFree(ne_d);
hipFree(vr_d);
hipFree(u_complex_d);
hipFree(vf_complex_d);
hipFree(v_complex_d);
return (0);
}
| 9a344a4806297e10933db239b6d79a593020907b.cu | /**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <stdlib.h>
#include <stdio.h>
#include <iostream>
#include <math.h>
#include <time.h>
#include <fstream>
#include <vector>
#include <fstream>
#include <curand_kernel.h>
#include <cufft.h>
float L,LL; int N, C,itera;
using namespace std;
// función Maxwelliana de la distribución de las partículas.
__device__ float distribution (float vb, float aleatorio, curandState *states) //generador de distribución maxwelliana para la velocidad
{
// Genera un valor random v
float fmax = 0.5 * (1.0 + exp (-2.0 * vb * vb));
float vmin = - 5.0 * vb;
float vmax = + 5.0 * vb;
float v;
float f;
float x;
int Idx = blockIdx.x*blockDim.x + threadIdx.x;
while(true){
v = vmin + ((vmax - vmin) * aleatorio);
f = 0.5 * (exp (-(v - vb) * (v - vb) / 2.0) +
exp (-(v + vb) * (v + vb) / 2.0));
x = fmax * aleatorio;
if(x > f) aleatorio = curand_uniform(states + Idx);
else return v;
}
}
//Distribución aleatoria de las partículas.
__global__ void distribucionParticulas(float *rx,float *ry,float *vx,float *vy,int N,curandState *states,float vb,float L){
int Idx = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int seed = (unsigned int) (clock() * Idx);
curand_init(seed, 0, 0, states + Idx);
if(Idx < N){
rx[Idx] = L*curand_uniform(states + Idx); //inicializando la posicion aleatoria en x
ry[Idx] = L*curand_uniform(states + Idx);
vx[Idx] = distribution(vb,curand_uniform(states + Idx),states);//;L*curand_uniform_float(states + Idx);//distribution(vb,states); //inicializa la velocidad con una distribucion maxwelliana
vy[Idx] = distribution(vb,curand_uniform(states + Idx),states);//L*curand_uniform_float(states + Idx);//distribution(vb,states); //inicializa la velocidad con una distribucion maxwelliana
}
}
// inicialización de la densidad.
__global__ void inicializacionDensidad(float *ne,int C){
int Id=blockIdx.x*blockDim.x + threadIdx.x;
if(Id<(C*C)){
ne[Id]=0.0;
}
}
//Calculo de la densidad en cada celda.
__global__ void calculoDensidad(float *rx, float *ry, float *ne, int N, int C,float L){
int Id=blockIdx.x*blockDim.x + threadIdx.x;
float dx = L / float (C);
float dxx=L/float(C*C);
if(Id<N){
int jx = int(rx[Id]/dx); //posicion en x de la particula
int jy = int(ry[Id]/dx); //posicion en y de la particula
float yx = (rx[Id]/dx) - (float)jx; //posicion exacta de la particula en x de la celda "j"
//float yy = (ry[Id]/dx) - (float)jy; //posicion exacta de la particula en y de la celda "j"
ne[(jy*C)+jx] += (1. - yx)/dxx;
if(jx+1==C) ne[(jy*C)] += yx/dxx;
else ne[(jy*C)+jx+1] += yx/dxx;
}
}
//pasar de reales a complejos.
__global__ void real2complex (float *ne, cufftComplex *u, int C)
{
int idx = blockIdx.x*blockDim.x+threadIdx.x;
int idy = blockIdx.y*blockDim.y+threadIdx.y;
int index =idy*C+idx;
if ( idx < C && idy <C)
{
u[index].x = ne[index];
u[index].y = 0.0f;
}
}
//__global__ void prueba (cufftComplex *vf, float *vr, int C){
// int idx = blockIdx.x*blockDim.x+threadIdx.x;
// int idy = blockIdx.y*blockDim.y+threadIdx.y;
// int index =idy*C+idx;
//
// if(idx<C && idy<C){
//
// vr[index]= (vf[index].x)/((float)C*(float)C*(float)C*(float)C);
// vr[index]= (vf[index].y)/((float)C*(float)C*(float)C*(float)C);
//
// }
//}
__global__ void solve_Poisson(cufftComplex *vf, cufftComplex *v, int C,float L){
int idx = blockIdx.x*blockDim.x+threadIdx.x;
int idy = blockIdx.y*blockDim.y+threadIdx.y;
float dx = L / float (C);
float i,W,Wm,Wn;
i = (0.0,L);
W = exp(2.0 * M_PI * i / float(C));
Wm = L;
Wn = L;
if(idx<C && idy<C){
int index = idy*C+idx;
float denom;
denom = 4.0;
denom -= (Wm + (L / Wm) + Wn +( L / Wn));
if (denom != 0.0){
vf[index].x *= dx*dx/denom;
vf[index].y *= dx*dx/denom;
}
Wn *= W;//se multiplica por la constante W
}
Wm *= W;
if(idx<C && idy<C){
int index = idx*C+idy;
v[index].x=vf[index].x;
v[index].y=vf[index].y;
}
}
__global__ void complex2real(cufftComplex *v, float *vr, int C){
/* compute idx and idy, the location of the element in the original CxC array */
int idx = blockIdx.x*blockDim.x+threadIdx.x;
int idy = blockIdx.y*blockDim.y+threadIdx.y;
if ( idx < C && idy <C)
{
int index = idy*C+idx;
vr[index] = v[index].x /((float)C*(float)C);
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
int main(){
// Parametros
L = 64.0; // dominio de la solucion 0 <= x <= L (en longitudes de debye)
//L=LL*LL;
N = 10000; // Numero de particulas
C = 64; // Número de celdas.
float vb = 3.0; // velocidad promedio de los electrones
//float kappa = 2. * M_PI / (L);
//float dt=0.1; // delta tiempo (en frecuencias inversas del plasma)
//float tmax=10000; // cantidad de iteraciones. deben ser 100 mil segun el material
//int skip = int (tmax / dt) / 10; //saltos del algoritmo para reportar datos
//int itera=0;
float salida=0.;
//float dx = L / float (C);
/////////////////////////////////////////////////////////////////////////////////////////////////////
//Inicializacion de la posición de las particulas en x, y y velocidad en vx,vy del host y dispositivo.
float *rx_h,*ry_h,*vx_h,*vy_h;
float *rx_d,*ry_d, *vx_d,*vy_d;
////////////////////////////////////////////////////////////////////////////////////////////////////
// inicialización de las variables de densidad del host y dispositivo.
float *ne_h;
float *ne_d;
float *vr_h;
float *vr_d;
////////////////////////////////////////////////////////////////////////////////////////////////////
//inicializacion tipo complex a real.
cufftComplex *u_complex_d,*vf_complex_d,*v_complex_d ;
cudaMalloc((void**)&u_complex_d,sizeof(cufftComplex)*C*C);
cudaMalloc((void**)&vf_complex_d,sizeof(cufftComplex)*C*C);
cudaMalloc((void**)&v_complex_d,sizeof(cufftComplex)*C*C);
////////////////////////////////////////////////////////////////////////////////////////////////////
int size = N*sizeof(float);
int size_ne=C*C*sizeof(float);
//////////////////////////////////////////////////////////////////////////////////////////////////////
//reserva en memoria al host
rx_h = (float *)malloc(size);
ry_h = (float *)malloc(size);
vx_h = (float *)malloc(size);
vy_h = (float *)malloc(size);
ne_h = (float *)malloc(size_ne);
vr_h = (float *)malloc(size_ne);
//////////////////////////////////////////////////////////////////////////////////////////////////////
//reserva de memoria del dispositivo.
cudaMalloc((void **)&rx_d,size);
cudaMalloc((void **)&ry_d,size);
cudaMalloc((void **)&vx_d,size);
cudaMalloc((void **)&vy_d,size);
cudaMalloc((void **)&ne_d,size_ne);
cudaMalloc((void **)&vr_d,size_ne);
////////////////////////////////////////////////////////////////////////////////////////////////////
//valores aleatorios y tamaños de los vectores.
curandState *devStates;
cudaMalloc((void **) &devStates, N * sizeof(curandState));
float blockSize = 1024;
dim3 dimBlock (ceil(N/blockSize), 1, 1);
dim3 dimBlock2 (ceil((C*C)/blockSize), 1, 1);
dim3 dimGrid (blockSize, 1, 1);
distribucionParticulas<<<blockSize,dimBlock>>>(rx_d,ry_d,vx_d,vy_d,N,devStates,vb,L);
cudaDeviceSynchronize();
inicializacionDensidad<<<blockSize,dimBlock2>>>(ne_d,C);
cudaDeviceSynchronize();
calculoDensidad<<<blockSize,dimBlock>>>(rx_d,ry_d,ne_d,N,C,L);
cudaDeviceSynchronize();
cufftHandle plan;
cufftPlan2d(&plan, C, C, CUFFT_C2C);
real2complex<<<blockSize,dimBlock2>>>(ne_d,u_complex_d,C);
cudaDeviceSynchronize();
cufftExecC2C (plan, u_complex_d, vf_complex_d, CUFFT_FORWARD);
// dividir el resultado por C4
//prueba<<<dimGrid, dimBlock2>>> (vf_complex_d,vr_d,C);
v_complex_d[0].x=0.0;
v_complex_d[0].y=0.0;
solve_Poisson<<<dimGrid, dimBlock2>>> (vf_complex_d,v_complex_d,C,L);
cudaDeviceSynchronize();
cufftExecC2C (plan, v_complex_d, v_complex_d, CUFFT_INVERSE);
complex2real<<<dimGrid, dimBlock2>>> (v_complex_d,vr_d,C);
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//posición en x.
cudaMemcpy(rx_h, rx_d, size, cudaMemcpyDeviceToHost);
// posición en y.
cudaMemcpy(ry_h, ry_d, size, cudaMemcpyDeviceToHost);
// velocidad en x.
cudaMemcpy(vx_h, vx_d, size, cudaMemcpyDeviceToHost);
//velocidad en y.
cudaMemcpy(vy_h, vy_d, size, cudaMemcpyDeviceToHost);
//inicializacion densidades
cudaMemcpy(ne_h, ne_d, size_ne, cudaMemcpyDeviceToHost);
//calculo poisson
cudaMemcpy (vr_h , vr_d, size_ne, cudaMemcpyDeviceToHost);
///////////////////Imprimir los resultados en archivos//////////////////////
ofstream init;
init.open("distribucionInicial.txt");
for (int i = 0; i < N; i++){
init<<rx_h[i]<<" "<<ry_h[i]<<" "<<vx_h[i]<<" "<<vy_h[i]<<endl;
}
init.close();
init.open("salida_densidad3.txt");
for (int i = 0; i < C*C; i++){
init<<ne_h[i]<<endl;
salida+=ne_h[i];
}
init.close();
cout<<salida<<" "<<endl;
init.open("entrada_poisson");
for (int i = 0; i < C; i++){
for (int j = 0; j < C; j++){
init<<ne_h[(C*i)+j]<<" ";
}
init<<endl;
}
init.close();
init.open("poisson");
for (int i = 0; i < C; i++){
for (int j = 0; j < C; j++){
init<< vr_h[(C*j)+i]<<" ";
}
init<<endl;
}
init.close();
////////////////////Liberar memoria//////////////////////////
free(rx_h);
free(ry_h);
free(vx_h);
free(vy_h);
free(ne_h);
free(vr_h);
cufftDestroy(plan);
cudaFree(rx_d);
cudaFree(ry_d);
cudaFree(vx_d);
cudaFree(vy_d);
cudaFree(ne_d);
cudaFree(vr_d);
cudaFree(u_complex_d);
cudaFree(vf_complex_d);
cudaFree(v_complex_d);
return (0);
}
|
f2b17eea38c015e29c7fd21725eef9b0569b54cd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <cstdlib>
#include <cfloat>
#include <hiprand/hiprand_kernel.h>
#include "includes/common.h"
#include "includes/hittable_list.h"
#include "includes/sphere.h"
#include "includes/camera.h"
#include "includes/material.h"
using namespace std;
const size_t IMAGE_WIDTH = 1200;
const size_t IMAGE_HEIGHT = 800;
const bool OUTPUT = true;
//__constant__ hittable_list* d_const_world;
// val -> the return value of CUDA calls
#define checkCudaError(val) checkError( (val), #val)
void checkError(hipError_t result, const char* func)
{
if (result != hipSuccess)
{
cerr << "CUDA error: " << hipGetErrorString(result) << " at " << __FILE__
<< ", line " << __LINE__ << ", func = " << func << endl;
// reset CUDA device before exiting
hipDeviceReset();
exit(EXIT_FAILURE);
}
}
__device__ color rayColor(const ray& r, hittable** d_world, hiprandState_t* state, int maxDepth)
{
ray currentRay = r;
color currentAttenuation = color(1.0f, 1.0f, 1.0f);
hittable* objects = *d_world;
for (int i = 0; i < maxDepth; i++)
{
hitRecord rec;
if (objects->hit(currentRay, 0.001f, FLT_MAX, rec))
{
ray scattered;
color attenuation;
if (rec.mat_ptr->scatter(currentRay, rec, attenuation, scattered, state))
{
currentRay = scattered;
currentAttenuation *= attenuation;
}
else
return color(0.0f, 0.0f, 0.0f);
}
else
{
vec3 unitDir = unit_vector(currentRay.direction());
float t = 0.5f * (unitDir.y() + 1.0f);
color clr = (1.0f - t) * color(1.0f, 1.0f, 1.0f) + t * color(0.5, 0.7, 1.0);
return currentAttenuation * clr;
}
}
return color(0.0f, 0.0f, 0.0f);
}
__global__ void render(color* framebuffer, int width, int height, int samplesPerPixel, hiprandState_t* state,
hittable** d_world, camera** d_camera, int maxDepth)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i >= width || j >= height) return;
int index = j * width + i;
color pixelColor(0.0f, 0.0f, 0.0f);
hiprandState_t local_state = state[index];
// construct the ray
for (int s = 0; s < samplesPerPixel; s++)
{
float u = float((i + random_float(&local_state))) / float(width - 1);
float v = float((j + random_float(&local_state))) / float(height - 1);
ray r = (*d_camera)->get_ray(u, v, &local_state);
pixelColor += rayColor(r, d_world, &local_state, maxDepth);
}
state[index] = local_state;
pixelColor /= samplesPerPixel;
// gamma correction = 2.0f
pixelColor[0] = sqrtf(pixelColor[0]);
pixelColor[1] = sqrtf(pixelColor[1]);
pixelColor[2] = sqrtf(pixelColor[2]);
framebuffer[index] = pixelColor;
}
__global__ void setupRender(hiprandState_t* state, int width, int height)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i >= width || j >= height) return;
int index = j * width + i;
// Each thread gets same seed, a different sequence number, no offset
hiprand_init(1998, index, 0, &state[index]);
}
__global__ void randInit(hiprandState_t* state)
{
if (threadIdx.x == 0 && blockIdx.x == 0)
hiprand_init(1998, 0, 0, state);
}
__global__ void randomScene(hittable** d_world, hittable** d_objects, camera** d_camera, hiprandState_t* state)
{
material* materialGround = new lambertian(color(0.5f, 0.5f, 0.5f));
d_objects[0] = new sphere(point3(0, -1000, 0), 1000, materialGround);
int index = 1;
// random small spheres
for (int a = -11; a < 11; a++)
{
for (int b = -11; b < 11; b++, index++)
{
float material = random_float(state);
point3 center(a + 0.9f * random_float(state), 0.2f, b + 0.9f * random_float(state));
color albedo;
if (material < 0.8f)
{
// diffuse
albedo = color::random(state) * color::random(state);
d_objects[index] = new sphere(center, 0.2f, new lambertian(albedo));
}
else if (material < 0.95f)
{
// metal
albedo = color::random(state, 0.5f, 1.0f);
float fuzz = random_float(state, 0.5f, 1.0f);
d_objects[index] = new sphere(center, 0.2f, new metal(albedo, fuzz));
}
else
{
// glass
d_objects[index] = new sphere(center, 0.2f, new dielectric(1.5f));
}
}
}
// 3 big spheres
d_objects[index] = new sphere(point3(0, 1, 0), 1.0, new dielectric(1.5));
d_objects[++index] = new sphere(point3(-4, 1, 0), 1.0, new lambertian(color(0.4, 0.2, 0.1)));
d_objects[++index] = new sphere(point3(4, 1, 0), 1.0, new metal(color(0.7, 0.6, 0.5), 0.0));
*d_world = new hittable_list(d_objects, 488);
// camera
point3 lookfrom(13, 2, 3);
point3 lookat(0, 0, 0);
vec3 vup(0, 1, 0);
float dist_to_focus = 10.0;
float aperture = 0.1;
*d_camera = new camera(lookfrom, lookat, vup, 35, 3.0f / 2.0f, aperture, dist_to_focus);
}
__global__ void createWorld(hittable** d_world, hittable** d_objects, camera** d_camera, int numObjects)
{
// execuate only once
if (threadIdx.x == 0 && blockIdx.x == 0)
{
point3 lookfrom(0, 3, 3);
point3 lookat(0, 0, -1);
vec3 vup(0, 0, -1);
float dist_to_focus = (lookfrom - lookat).length();
float aperture = 2.0;
material* material_ground = new lambertian(color(0.8, 0.8, 0.0));
material* material_center = new lambertian(color(0.1, 0.2, 0.5));
material* material_left = new dielectric(1.5);
material* material_right = new metal(color(0.8, 0.6, 0.2), 1.0);
*d_objects = new sphere(point3(0.0, -100.5, -1.0), 100.0, material_ground);
*(d_objects + 1) = new sphere(point3(0.0, 0.0, -1.0), 0.5, material_center);
*(d_objects + 2) = new sphere(point3(-1.0, 0.0, -1.0), 0.5, material_left);
*(d_objects + 3) = new sphere(point3(1.0, 0.0, -1.0), 0.5, material_right);
*(d_objects + 4) = new sphere(point3(-1.0, 0.0, -1.0), -0.45, new dielectric(1.5));
*d_world = new hittable_list(d_objects, numObjects);
*d_camera = new camera(lookfrom, lookat, vup, 30, 4.0f/3.0f, aperture, dist_to_focus);
}
}
__global__ void deleteWorld(hittable** d_world, hittable** d_objects, camera** d_camera, int numObjects)
{
// execuate only once
if (threadIdx.x == 0 && blockIdx.x == 0)
{
for (int i = 0; i < numObjects; i++)
{
delete ((sphere*)d_objects[i])->mat_ptr;
delete d_objects[i];
}
delete* d_world;
delete* d_camera;
}
}
int main(int argc, char* args[])
{
size_t num_pixels = IMAGE_WIDTH * IMAGE_HEIGHT;
size_t framebufferSize = num_pixels * sizeof(vec3);
int samplesPerPixel = 200;
int maxDepth = 50;
int numObjects = 488;
// allocate framebuffer to unified memory
color* framebuffer;
checkCudaError(hipMallocManaged((void**)&framebuffer, framebufferSize));
// allocate camera
camera** d_camera;
checkCudaError(hipMalloc((void**)&d_camera, sizeof(camera*)));
// allocate random states
hiprandState_t* d_state;
checkCudaError(hipMalloc((void**)&d_state, num_pixels * sizeof(hiprandState_t)));
hiprandState_t* d_state2;
checkCudaError(hipMalloc((void**)&d_state2, sizeof(hiprandState_t)));
// allocate world and objects
hittable** d_world;
size_t d_worldSize = sizeof(hittable*);
checkCudaError(hipMalloc((void**)&d_world, d_worldSize));
hittable** d_objects;
size_t d_objectsSize = numObjects * sizeof(hittable*);
cerr << "d_objectsSize = " << d_objectsSize << endl;
cerr << "d_worldSize = " << d_worldSize << endl;
cerr << "size of d_state = " << num_pixels * sizeof(hiprandState_t) << endl;
checkCudaError(hipMalloc((void**)&d_objects, d_objectsSize));
// setup random state for randomScene
hipLaunchKernelGGL(( randInit), dim3(1), dim3(1), 0, 0, d_state2);
checkCudaError(hipGetLastError());
checkCudaError(hipDeviceSynchronize());
// setup randomScene
hipLaunchKernelGGL(( randomScene), dim3(1), dim3(1), 0, 0, d_world, d_objects, d_camera, d_state2);
checkCudaError(hipGetLastError());
checkCudaError(hipDeviceSynchronize());
//checkCudaError(hipMemcpyToSymbol("d_const_world", *d_world, sizeof(hittable_list), 0, hipMemcpyDeviceToDevice));
// configure parameters
// 30 * 20 is the best
// 32 * 32 the worst
int blockDimX = 30, blockDimY = 20;
dim3 dimBlock(blockDimX, blockDimY);
dim3 dimGrid((IMAGE_WIDTH + blockDimX - 1) / blockDimX, (IMAGE_HEIGHT + blockDimY - 1) / blockDimY);
// setup random state for render
hipLaunchKernelGGL(( setupRender), dim3(dimGrid), dim3(dimBlock), 0, 0, d_state, IMAGE_WIDTH, IMAGE_HEIGHT);
checkCudaError(hipGetLastError());
checkCudaError(hipDeviceSynchronize());
// invoke render
hipLaunchKernelGGL(( render), dim3(dimGrid), dim3(dimBlock) , 0, 0, framebuffer, IMAGE_WIDTH, IMAGE_HEIGHT, samplesPerPixel, d_state, d_world, d_camera, maxDepth);
checkCudaError(hipGetLastError());
checkCudaError(hipDeviceSynchronize());
// using command line param to output rendered PPM image
if (OUTPUT)
{
std::cout << "P3\n" << IMAGE_WIDTH << " " << IMAGE_HEIGHT << "\n255\n";
for (int j = IMAGE_HEIGHT - 1; j >= 0; j--)
{
for (int i = 0; i < IMAGE_WIDTH; i++)
{
int index = j * IMAGE_WIDTH + i;
int ir = int(255.99 * framebuffer[index].x());
int ig = int(255.99 * framebuffer[index].y());
int ib = int(255.99 * framebuffer[index].z());
std::cout << ir << " " << ig << " " << ib << endl;
}
}
}
// clean up
hipLaunchKernelGGL(( deleteWorld), dim3(1), dim3(1), 0, 0, d_world, d_objects, d_camera, numObjects);
checkCudaError(hipGetLastError());
checkCudaError(hipDeviceSynchronize());
checkCudaError(hipFree(d_world));
checkCudaError(hipFree(d_objects));
checkCudaError(hipFree(d_camera));
checkCudaError(hipFree(d_state));
checkCudaError(hipFree(framebuffer));
hipDeviceReset();
return 0;
} | f2b17eea38c015e29c7fd21725eef9b0569b54cd.cu | #include <iostream>
#include <cstdlib>
#include <cfloat>
#include <curand_kernel.h>
#include "includes/common.h"
#include "includes/hittable_list.h"
#include "includes/sphere.h"
#include "includes/camera.h"
#include "includes/material.h"
using namespace std;
const size_t IMAGE_WIDTH = 1200;
const size_t IMAGE_HEIGHT = 800;
const bool OUTPUT = true;
//__constant__ hittable_list* d_const_world;
// val -> the return value of CUDA calls
#define checkCudaError(val) checkError( (val), #val)
void checkError(cudaError_t result, const char* func)
{
if (result != cudaSuccess)
{
cerr << "CUDA error: " << cudaGetErrorString(result) << " at " << __FILE__
<< ", line " << __LINE__ << ", func = " << func << endl;
// reset CUDA device before exiting
cudaDeviceReset();
exit(EXIT_FAILURE);
}
}
__device__ color rayColor(const ray& r, hittable** d_world, curandState* state, int maxDepth)
{
ray currentRay = r;
color currentAttenuation = color(1.0f, 1.0f, 1.0f);
hittable* objects = *d_world;
for (int i = 0; i < maxDepth; i++)
{
hitRecord rec;
if (objects->hit(currentRay, 0.001f, FLT_MAX, rec))
{
ray scattered;
color attenuation;
if (rec.mat_ptr->scatter(currentRay, rec, attenuation, scattered, state))
{
currentRay = scattered;
currentAttenuation *= attenuation;
}
else
return color(0.0f, 0.0f, 0.0f);
}
else
{
vec3 unitDir = unit_vector(currentRay.direction());
float t = 0.5f * (unitDir.y() + 1.0f);
color clr = (1.0f - t) * color(1.0f, 1.0f, 1.0f) + t * color(0.5, 0.7, 1.0);
return currentAttenuation * clr;
}
}
return color(0.0f, 0.0f, 0.0f);
}
__global__ void render(color* framebuffer, int width, int height, int samplesPerPixel, curandState* state,
hittable** d_world, camera** d_camera, int maxDepth)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i >= width || j >= height) return;
int index = j * width + i;
color pixelColor(0.0f, 0.0f, 0.0f);
curandState local_state = state[index];
// construct the ray
for (int s = 0; s < samplesPerPixel; s++)
{
float u = float((i + random_float(&local_state))) / float(width - 1);
float v = float((j + random_float(&local_state))) / float(height - 1);
ray r = (*d_camera)->get_ray(u, v, &local_state);
pixelColor += rayColor(r, d_world, &local_state, maxDepth);
}
state[index] = local_state;
pixelColor /= samplesPerPixel;
// gamma correction = 2.0f
pixelColor[0] = sqrtf(pixelColor[0]);
pixelColor[1] = sqrtf(pixelColor[1]);
pixelColor[2] = sqrtf(pixelColor[2]);
framebuffer[index] = pixelColor;
}
__global__ void setupRender(curandState* state, int width, int height)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i >= width || j >= height) return;
int index = j * width + i;
// Each thread gets same seed, a different sequence number, no offset
curand_init(1998, index, 0, &state[index]);
}
__global__ void randInit(curandState* state)
{
if (threadIdx.x == 0 && blockIdx.x == 0)
curand_init(1998, 0, 0, state);
}
__global__ void randomScene(hittable** d_world, hittable** d_objects, camera** d_camera, curandState* state)
{
material* materialGround = new lambertian(color(0.5f, 0.5f, 0.5f));
d_objects[0] = new sphere(point3(0, -1000, 0), 1000, materialGround);
int index = 1;
// random small spheres
for (int a = -11; a < 11; a++)
{
for (int b = -11; b < 11; b++, index++)
{
float material = random_float(state);
point3 center(a + 0.9f * random_float(state), 0.2f, b + 0.9f * random_float(state));
color albedo;
if (material < 0.8f)
{
// diffuse
albedo = color::random(state) * color::random(state);
d_objects[index] = new sphere(center, 0.2f, new lambertian(albedo));
}
else if (material < 0.95f)
{
// metal
albedo = color::random(state, 0.5f, 1.0f);
float fuzz = random_float(state, 0.5f, 1.0f);
d_objects[index] = new sphere(center, 0.2f, new metal(albedo, fuzz));
}
else
{
// glass
d_objects[index] = new sphere(center, 0.2f, new dielectric(1.5f));
}
}
}
// 3 big spheres
d_objects[index] = new sphere(point3(0, 1, 0), 1.0, new dielectric(1.5));
d_objects[++index] = new sphere(point3(-4, 1, 0), 1.0, new lambertian(color(0.4, 0.2, 0.1)));
d_objects[++index] = new sphere(point3(4, 1, 0), 1.0, new metal(color(0.7, 0.6, 0.5), 0.0));
*d_world = new hittable_list(d_objects, 488);
// camera
point3 lookfrom(13, 2, 3);
point3 lookat(0, 0, 0);
vec3 vup(0, 1, 0);
float dist_to_focus = 10.0;
float aperture = 0.1;
*d_camera = new camera(lookfrom, lookat, vup, 35, 3.0f / 2.0f, aperture, dist_to_focus);
}
__global__ void createWorld(hittable** d_world, hittable** d_objects, camera** d_camera, int numObjects)
{
// execuate only once
if (threadIdx.x == 0 && blockIdx.x == 0)
{
point3 lookfrom(0, 3, 3);
point3 lookat(0, 0, -1);
vec3 vup(0, 0, -1);
float dist_to_focus = (lookfrom - lookat).length();
float aperture = 2.0;
material* material_ground = new lambertian(color(0.8, 0.8, 0.0));
material* material_center = new lambertian(color(0.1, 0.2, 0.5));
material* material_left = new dielectric(1.5);
material* material_right = new metal(color(0.8, 0.6, 0.2), 1.0);
*d_objects = new sphere(point3(0.0, -100.5, -1.0), 100.0, material_ground);
*(d_objects + 1) = new sphere(point3(0.0, 0.0, -1.0), 0.5, material_center);
*(d_objects + 2) = new sphere(point3(-1.0, 0.0, -1.0), 0.5, material_left);
*(d_objects + 3) = new sphere(point3(1.0, 0.0, -1.0), 0.5, material_right);
*(d_objects + 4) = new sphere(point3(-1.0, 0.0, -1.0), -0.45, new dielectric(1.5));
*d_world = new hittable_list(d_objects, numObjects);
*d_camera = new camera(lookfrom, lookat, vup, 30, 4.0f/3.0f, aperture, dist_to_focus);
}
}
__global__ void deleteWorld(hittable** d_world, hittable** d_objects, camera** d_camera, int numObjects)
{
// execuate only once
if (threadIdx.x == 0 && blockIdx.x == 0)
{
for (int i = 0; i < numObjects; i++)
{
delete ((sphere*)d_objects[i])->mat_ptr;
delete d_objects[i];
}
delete* d_world;
delete* d_camera;
}
}
int main(int argc, char* args[])
{
size_t num_pixels = IMAGE_WIDTH * IMAGE_HEIGHT;
size_t framebufferSize = num_pixels * sizeof(vec3);
int samplesPerPixel = 200;
int maxDepth = 50;
int numObjects = 488;
// allocate framebuffer to unified memory
color* framebuffer;
checkCudaError(cudaMallocManaged((void**)&framebuffer, framebufferSize));
// allocate camera
camera** d_camera;
checkCudaError(cudaMalloc((void**)&d_camera, sizeof(camera*)));
// allocate random states
curandState* d_state;
checkCudaError(cudaMalloc((void**)&d_state, num_pixels * sizeof(curandState)));
curandState* d_state2;
checkCudaError(cudaMalloc((void**)&d_state2, sizeof(curandState)));
// allocate world and objects
hittable** d_world;
size_t d_worldSize = sizeof(hittable*);
checkCudaError(cudaMalloc((void**)&d_world, d_worldSize));
hittable** d_objects;
size_t d_objectsSize = numObjects * sizeof(hittable*);
cerr << "d_objectsSize = " << d_objectsSize << endl;
cerr << "d_worldSize = " << d_worldSize << endl;
cerr << "size of d_state = " << num_pixels * sizeof(curandState) << endl;
checkCudaError(cudaMalloc((void**)&d_objects, d_objectsSize));
// setup random state for randomScene
randInit<<<1, 1>>>(d_state2);
checkCudaError(cudaGetLastError());
checkCudaError(cudaDeviceSynchronize());
// setup randomScene
randomScene<<<1, 1>>>(d_world, d_objects, d_camera, d_state2);
checkCudaError(cudaGetLastError());
checkCudaError(cudaDeviceSynchronize());
//checkCudaError(cudaMemcpyToSymbol("d_const_world", *d_world, sizeof(hittable_list), 0, cudaMemcpyDeviceToDevice));
// configure parameters
// 30 * 20 is the best
// 32 * 32 the worst
int blockDimX = 30, blockDimY = 20;
dim3 dimBlock(blockDimX, blockDimY);
dim3 dimGrid((IMAGE_WIDTH + blockDimX - 1) / blockDimX, (IMAGE_HEIGHT + blockDimY - 1) / blockDimY);
// setup random state for render
setupRender<<<dimGrid, dimBlock>>>(d_state, IMAGE_WIDTH, IMAGE_HEIGHT);
checkCudaError(cudaGetLastError());
checkCudaError(cudaDeviceSynchronize());
// invoke render
render<<< dimGrid, dimBlock >>>(framebuffer, IMAGE_WIDTH, IMAGE_HEIGHT, samplesPerPixel, d_state, d_world, d_camera, maxDepth);
checkCudaError(cudaGetLastError());
checkCudaError(cudaDeviceSynchronize());
// using command line param to output rendered PPM image
if (OUTPUT)
{
std::cout << "P3\n" << IMAGE_WIDTH << " " << IMAGE_HEIGHT << "\n255\n";
for (int j = IMAGE_HEIGHT - 1; j >= 0; j--)
{
for (int i = 0; i < IMAGE_WIDTH; i++)
{
int index = j * IMAGE_WIDTH + i;
int ir = int(255.99 * framebuffer[index].x());
int ig = int(255.99 * framebuffer[index].y());
int ib = int(255.99 * framebuffer[index].z());
std::cout << ir << " " << ig << " " << ib << endl;
}
}
}
// clean up
deleteWorld<<<1, 1>>>(d_world, d_objects, d_camera, numObjects);
checkCudaError(cudaGetLastError());
checkCudaError(cudaDeviceSynchronize());
checkCudaError(cudaFree(d_world));
checkCudaError(cudaFree(d_objects));
checkCudaError(cudaFree(d_camera));
checkCudaError(cudaFree(d_state));
checkCudaError(cudaFree(framebuffer));
cudaDeviceReset();
return 0;
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.